def read_electrics(e_path, e_site, samp, hi, low):
    """ Read measured electric time series
		
		Parameters
		-----------
		e_path = Folder with electric field time series
         e_site = Name of the site to compute electric fields
         samp = sampling rate (in seconds)
         hi = Maximum period to analyse (seconds)
         low = Minimum period to analyse (seconds)


		Returns
		-----------
		m_e = Measured electric time series (including x and y components)

		-----------------------------------------------------------------
    """

    # Read the electric field time series
    x_input = str(e_path) + str(e_site) + "Ex.txt"
    y_input = str(e_path) + str(e_site) + "Ey.txt"

    # Read electric time series
    f = open(x_input, 'r')
    ex = np.loadtxt(f)
    f.close()

    f = open(y_input, 'r')
    ey = np.loadtxt(f)
    f.close()

    # Select periods of interest (frequency domain)
    perd, ex_fft, ey_fft = scr_fft(ex, ey, samp)

    factor = np.ones(perd.shape[0])
    for i, v in enumerate(perd):
        if (v < low) or (v > hi):
            factor[i] = 0

    ex_cfft = (factor * ex_fft)
    ey_cfft = (factor * ey_fft)
    ex_m = np.array(ifft(ex_cfft + np.conj(np.roll(ex_cfft[::-1], 1))).real)
    ey_m = np.array(ifft(ey_cfft + np.conj(np.roll(ey_cfft[::-1], 1))).real)

    # Create vector with electric time series
    m_e = np.array([ex_m, ey_m])

    return (m_e.T)
Esempio n. 2
0
def expBroaden(y, t, expMod):
    fy = F.fft(y)
    a = N.exp(-1 * expMod * N.arange(0, len(y)) / t)
    fa = F.fft(a)
    fy1 = fy * fa
    yb = (F.ifft(fy1).real) / N.sum(a)
    return yb
Esempio n. 3
0
def chebfft(v):
    '''Chebyshev differentiation via fft.
       Ref.: Trefethen's 'Spectral Methods in MATLAB' book.
    '''
    N = len(v) - 1
    if N == 0:
        w = 0.0  # only when N is even!
        return w
    x = cos(pi * arange(0, N + 1) / N)
    ii = arange(0, N)
    V = flipud(v[1:N])
    V = list(v) + list(V)
    U = real(fft(V))
    b = list(ii)
    b.append(0)
    b = b + list(arange(1 - N, 0))
    w_hat = 1j * array(b)
    w_hat = w_hat * U
    W = real(ifft(w_hat))
    w = zeros(N + 1)
    w[1:N] = -W[1:N] / sqrt(1 - x[1:N]**2)
    w[0] = sum(ii**2 * U[ii]) / N + 0.5 * N * U[N]
    w[N] = sum((-1)**(ii+1)*ii**2*U[ii])/N + \
              0.5*(-1)**(N+1)*N*U[N]
    return w
Esempio n. 4
0
def ichebt2(c):
    """inverse chebyshev transformation, values of function in Chebyshev
    nodes of the second kind, see chebfun for details"""
    n = len(c)
    oncircle = concatenate(([c[-1]],c[-2:0:-1]/2, c[0:-1]/2));
    v = real(ifft(oncircle));
    f = (n-1)*concatenate(([2*v[0]], v[1:n-1]+v[-1:n-1:-1], [2*v[n-1]] ))
    return f
Esempio n. 5
0
def ichebt2(c):
    """inverse chebyshev transformation, values of function in Chebyshev
    nodes of the second kind, see chebfun for details"""
    n = len(c)
    oncircle = concatenate(([c[-1]],c[-2:0:-1]/2, c[0:-1]/2));
    v = real(ifft(oncircle));
    f = (n-1)*concatenate(([2*v[0]], v[1:n-1]+v[-1:n-1:-1], [2*v[n-1]] ))
    return f
Esempio n. 6
0
def hilbert(mag):
    """Compute the modified 1D discrete Hilbert transform

    Parameters
    ----------
    mag : ndarray
        The magnitude spectrum. Should be 1D with an even length, and
        preferably a fast length for FFT/IFFT.
    """
    # Adapted based on code by Niranjan Damera-Venkata,
    # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
    sig = np.zeros(len(mag))
    # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
    midpt = len(mag) // 2
    sig[1:midpt] = 1
    sig[midpt+1:] = -1
    # eventually if we want to support complex filters, we will need a
    # np.abs() on the mag inside the log, and should remove the .real
    recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
    return recon
Esempio n. 7
0
def dofft(iq, reverse=False):
    if reverse:
        # iq_ifft = np.fft.ifftshift(fftpack.ifft(iq))
        iq_ifft = fftpack.ifft(iq)
        return iq_ifft
    else:
        # iq_fft = np.fft.fftshift(fftpack.fft(iq))  # fft and shift axis
        iq_fft = fftpack.fft(iq)  # fft and shift axis
        # iq_fft = fftpack.fft(iq)  # fft no shift axis
        # iq_fft = 20 * np.log10(abs((iq_fft + 1e-15) / N))  # convert to decibels, adjust power

        # adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
        return iq_fft
Esempio n. 8
0
def ichebt1(c):
    #TODO
    """inverse chebyshev transformation, see chebfun"""
    n = len(c)
    print "tam===", n
    oncircle = concatenate((c[-1::-1], c[1:-1]));
    print "v=", oncircle, n
    v = real(ifft(oncircle));
    print v
    print v[-2:n:-1]
    print "|", v[1:-1]
    f = (n-1)*concatenate(([2*v(1)], v[-2:n:-1]+v[1:-1], 2*v[-1]));
    print "|", f
    return f
Esempio n. 9
0
def ichebt1(c):
    #TODO
    """inverse chebyshev transformation, see chebfun"""
    n = len(c)
    print("tam===", n)
    oncircle = concatenate((c[-1::-1], c[1:-1]));
    print("v=", oncircle, n)
    v = real(ifft(oncircle));
    print(v)
    print(v[-2:n:-1])
    print("|", v[1:-1])
    f = (n-1)*concatenate(([2*v(1)], v[-2:n:-1]+v[1:-1], 2*v[-1]));
    print("|", f)
    return f
Esempio n. 10
0
def chebfft(v):
    '''Chebyshev differentiation via fft.
       Ref.: Trefethen's 'Spectral Methods in MATLAB' book.
    '''
    N = len(v)-1
    if N == 0:
        w = 0.0 # only when N is even!
        return w
    x  = cos(pi*arange(0,N+1)/N)
    ii = arange(0,N)
    V = flipud(v[1:N]); V = list(v) + list(V);
    U = real(fft(V))
    b = list(ii); b.append(0); b = b + list(arange(1-N,0));
    w_hat = 1j*array(b)
    w_hat = w_hat * U
    W = real(ifft(w_hat))
    w = zeros(N+1)
    w[1:N] = -W[1:N]/sqrt(1-x[1:N]**2)
    w[0] = sum(ii**2*U[ii])/N + 0.5*N*U[N]
    w[N] = sum((-1)**(ii+1)*ii**2*U[ii])/N + \
              0.5*(-1)**(N+1)*N*U[N]
    return w
Esempio n. 11
0
    t_fft = time.clock() - t0
    print 'Multiply vectors...'
    t0 = time.clock()
    tA = tA**M[i]  #  % O(n)
    #################
    ptA = ptA * tA  #  % O(n)#this is where it is messing UP
    #################
    t1 = time.clock()
    t_mult = t1 - t0

print 'Time for FFT: %4.2f s' % t_fft
print 'Time for multiplications: %4.2f s' % t_mult

print 'Calculate IFFT...'
t0 = time.clock()
ptA = F.ifft(ptA).real  #;  % O(nlogn)

print 'Time for IFFT: %4.2f s' % (time.clock() - t0)

print 'Plotting...'
t0 = time.clock()

start = (FOLDED * (WINDOW_SIZE - 1) + 1) * RESOLUTION + MASS_REMOVED, (
    FOLDED + 1) * (WINDOW_SIZE - 1) * RESOLUTION + MASS_REMOVED
stop = WINDOW_SIZE - 1

MA = N.linspace((FOLDED * (WINDOW_SIZE - 1) + 1) * RESOLUTION + MASS_REMOVED,
                (FOLDED + 1) * (WINDOW_SIZE - 1) * RESOLUTION + MASS_REMOVED,
                WINDOW_SIZE - 1)

ind = N.where(ptA > CUTOFF)[0]
def isotopefn(givenmass):
    def next2pow(x):
        return 2**int(N.ceil(N.log(float(x))/N.log(2.0)))


    MAX_ELEMENTS=5+1  # add 1 due to mass correction 'element'
    MAX_ISOTOPES=4    # maxiumum # of isotopes for one element
    CUTOFF=1e-8       # relative intensity cutoff for plotting

    WINDOW_SIZE = 500
    #WINDOW_SIZE=input('Window size (in Da) ---> ');

    #RESOLUTION=input('Resolution (in Da) ----> ');  % mass unit used in vectors
    RESOLUTION = 0.5
    if RESOLUTION < 0.00001:#  % minimal mass step allowed
      RESOLUTION = 0.00001
    elif RESOLUTION > 0.5:  # maximal mass step allowed
      RESOLUTION = 0.5

    R=0.00001/RESOLUTION#  % R is used to scale nuclide masses (see below)

    WINDOW_SIZE=WINDOW_SIZE/RESOLUTION;   # convert window size to new mass units
    WINDOW_SIZE=next2pow(WINDOW_SIZE);  # fast radix-2 fast-Fourier transform algorithm

    if WINDOW_SIZE < N.round(496708*R)+1:
      WINDOW_SIZE = nextpow2(N.round(496708*R)+1)  # just to make sure window is big enough

    #print 'Vector size: 1x%d'%WINDOW_SIZE

    #H378 C254 N65 O75 S6
    resnumber=N.round(float(givenmass)/110,0)
    #print resnumber
    abuns=[7.593,4.869,1.351,1.492,0.051]
    myarr=[N.round(resnumber*k,0) for k in abuns]
    #myarr=[98,63,18,13,1]
    myarr.append(0)
    #print myarr
    M=N.array(myarr) #% empiric formula, e.g. bovine insulin

    # isotopic abundances stored in matrix A (one row for each element)
    A=N.zeros((MAX_ELEMENTS,MAX_ISOTOPES,2));

    A[0][0,:] = [100783,0.9998443]#                 % 1H
    A[0][1,:] = [201410,0.0001557]#                 % 2H
    A[1][0,:] = [100000,0.98889]#                   % 12C
    A[1][1,:] = [200336,0.01111]#                   % 13C
    A[2][0,:] = [100307,0.99634]#                   % 14N
    A[2][1,:] = [200011,0.00366]#                   % 15N
    A[3][0,:] = [99492,0.997628]#                  % 16O
    A[3][1,:] = [199913,0.000372]#                  % 17O
    A[3][2,:] = [299916,0.002000]#                  % 18O
    A[4][0,:] = [97207,0.95018]#                   % 32S
    A[4][1,:] = [197146,0.00750]#                   % 33S
    A[4][2,:] = [296787,0.04215]#                   % 34S
    A[4][2,:] = [496708,0.00017]#                   % 36S
    A[5][0,:] = [100000,1.00000]#                   % for shifting mass so that Mmi is
    #                                             % near left limit of window

    Mmi=N.array([N.round(100783*R), N.round(100000*R),\
                 N.round(100307*R), N.round(99492*R), N.round(97207*R), 0])*M#  % (Virtual) monoisotopic mass in new units
    Mmi = Mmi.sum()
    #% mass shift so Mmi is in left limit of window:
    #print "Mmi",Mmi
    #print "Window", WINDOW_SIZE
    FOLDED=N.floor(Mmi/(WINDOW_SIZE-1))+1#  % folded FOLDED times (always one folding due to shift below)

    #% shift distribution to 1 Da from lower window limit:
    M[MAX_ELEMENTS-1]=N.ceil(((WINDOW_SIZE-1)-N.mod(Mmi,WINDOW_SIZE-1)+N.round(100000*R))*RESOLUTION)
    MASS_REMOVED=N.array([0,11,13,15,31,-1])*M#';  % correction for 'virtual' elements and mass shift
    MASS_REMOVED = MASS_REMOVED.sum()

    ptA=N.ones(WINDOW_SIZE);
    t_fft=0
    t_mult=0

    for i in xrange(MAX_ELEMENTS):

        tA=N.zeros(WINDOW_SIZE)
        for j in xrange(MAX_ISOTOPES):
            if A[i][j,0] != 0:
                #removed +1 after R)+1 --we're using python
                tA[N.round(A[i][j,0]*R)]=A[i][j,1]#;  % put isotopic distribution in tA

        #print 'Calculate FFT...'
        
        tA=F.fft(tA) # FFT along elements isotopic distribution  O(nlogn)
        
        #print 'Multiply vectors...'
        
        tA=tA**M[i]#  % O(n)
        #################
        ptA = ptA*tA#  % O(n)#this is where it is messing UP
        #################
        
        


    #rint 'Time for FFT: %4.2f s'%t_fft
    #print 'Time for multiplications: %4.2f s'%t_mult

    #print 'Calculate IFFT...'
    
    ptA=F.ifft(ptA).real#;  % O(nlogn)

    #print 'Time for IFFT: %4.2f s'%(time.clock()-t0)

    #print 'Plotting...'
    


    start = (FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED
    stop = WINDOW_SIZE - 1

    MA=N.linspace((FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED, WINDOW_SIZE-1)

    ind=N.where(ptA>CUTOFF)[0]


    y = ptA[ind]
    y2=[x/y[0] for x in y]
    return y2
Esempio n. 13
0
def isotopefn(sequence, ion_number):

    massDict = mass.Composition(sequence[:ion_number])

    massDict['C'] -= 1
    massDict['H'] -= 2
    massDict['O'] -= 2

    if 'S' not in massDict:
        massDict['S'] = 0

    def next2pow(x):
        return 2**int(np.ceil(np.log(float(x)) / np.log(2.0)))

    MAX_ELEMENTS = 5 + 1  # add 1 due to mass correction 'element'
    MAX_ISOTOPES = 4  # maxiumum # of isotopes for one element
    CUTOFF = 1e-8  # relative intensity cutoff for plotting

    WINDOW_SIZE = 500
    # WINDOW_SIZE=input('Window size (in Da) ---> ');

    # RESOLUTION=input('Resolution (in Da) ----> ');  % mass unit used in vectors
    RESOLUTION = 0.5
    if RESOLUTION < 0.00001:  # % minimal mass step allowed
        RESOLUTION = 0.00001
    elif RESOLUTION > 0.5:  # maximal mass step allowed
        RESOLUTION = 0.5

    R = 0.00001 / RESOLUTION  # % R is used to scale nuclide masses (see below)

    WINDOW_SIZE = WINDOW_SIZE / RESOLUTION
    # convert window size to new mass units
    WINDOW_SIZE = next2pow(WINDOW_SIZE)
    # fast radix-2 fast-Fourier transform algorithm

    if WINDOW_SIZE < np.round(496708 * R) + 1:
        WINDOW_SIZE = next2pow(np.round(496708 * R) +
                               1)  # just to make sure window is big enough

    # print 'Vector size: 1x%d'%WINDOW_SIZE

    # H378 C254 N65 O75 S6
    M = np.array([
        massDict['H'], massDict['C'], massDict['N'], massDict['O'],
        massDict['S'], 1
    ])

    # isotopic abundances stored in matrix A (one row for each element)
    A = np.zeros((MAX_ELEMENTS, MAX_ISOTOPES, 2))

    A[0][0, :] = [100783, 0.9998443]  # % 1H
    A[0][1, :] = [201410, 0.0001557]  # % 2H
    A[1][0, :] = [100000, 0.98889]  # % 12C
    A[1][1, :] = [200336, 0.01111]  # % 13C
    A[2][0, :] = [100307, 0.99634]  # % 14N
    A[2][1, :] = [200011, 0.00366]  # % 15N
    A[3][0, :] = [99492, 0.997628]  # % 16O
    A[3][1, :] = [199913, 0.000372]  # % 17O
    A[3][2, :] = [299916, 0.002000]  # % 18O
    A[4][0, :] = [97207, 0.95018]  # % 32S
    A[4][1, :] = [197146, 0.00750]  # % 33S
    A[4][2, :] = [296787, 0.04215]  # % 34S
    A[4][2, :] = [496708, 0.00017]  # % 36S
    A[5][0, :] = [100000, 1.00000]  # % for shifting mass so that Mmi is
    #                                             % near left limit of window

    Mmi = np.array([np.round(100783 * R), np.round(100000 * R), \
                    np.round(100307 * R), np.round(99492 * R), np.round(97207 * R),
                    0]) * M  # % (Virtual) monoisotopic mass in new units
    Mmi = Mmi.sum()
    # % mass shift so Mmi is in left limit of window:
    # print "Mmi",Mmi
    # print "Window", WINDOW_SIZE
    FOLDED = np.floor(
        Mmi / (WINDOW_SIZE - 1)
    ) + 1  # % folded FOLDED times (always one folding due to shift below)

    # % shift distribution to 1 Da from lower window limit:
    M[MAX_ELEMENTS - 1] = np.ceil(
        ((WINDOW_SIZE - 1) - np.mod(Mmi, WINDOW_SIZE - 1) +
         np.round(100000 * R)) * RESOLUTION)
    MASS_REMOVED = np.array([
        0, 11, 13, 15, 31, -1
    ]) * M  # ';  % correction for 'virtual' elements and mass shift
    MASS_REMOVED = MASS_REMOVED.sum()

    ptA = np.ones(WINDOW_SIZE)
    t_fft = 0
    t_mult = 0

    for i in xrange(MAX_ELEMENTS):

        tA = np.zeros(WINDOW_SIZE)
        for j in xrange(MAX_ISOTOPES):
            if A[i][j, 0] != 0:
                # removed +1 after R)+1 --we're using python
                tA[np.int(np.round(
                    A[i][j, 0] *
                    R))] = A[i][j, 1]  # ;  % put isotopic distribution in tA

        # print 'Calculate FFT...'

        tA = F.fft(tA)  # FFT along elements isotopic distribution  O(nlogn)

        # print 'Multiply vectors...'

        tA = tA**M[i]  # % O(n)
        #################
        ptA = ptA * tA  # % O(n)#this is where it is messing UP
        #################

    # rint 'Time for FFT: %4.2f s'%t_fft
    # print 'Time for multiplications: %4.2f s'%t_mult

    # print 'Calculate IFFT...'

    ptA = F.ifft(ptA).real  # ;  % O(nlogn)

    # print 'Time for IFFT: %4.2f s'%(time.clock()-t0)

    # print 'Plotting...'

    start = (FOLDED * (WINDOW_SIZE - 1) + 1) * RESOLUTION + MASS_REMOVED, (
        FOLDED + 1) * (WINDOW_SIZE - 1) * RESOLUTION + MASS_REMOVED
    stop = WINDOW_SIZE - 1

    MA = np.linspace(
        (FOLDED * (WINDOW_SIZE - 1) + 1) * RESOLUTION + MASS_REMOVED,
        (FOLDED + 1) * (WINDOW_SIZE - 1) * RESOLUTION + MASS_REMOVED,
        WINDOW_SIZE - 1)

    ind = np.where(ptA > CUTOFF)[0]

    y = ptA[ind]
    y2 = y / np.max(y)
    return y2
Esempio n. 14
0
import pylab as P


MAX_ELEMENTS = 5
MAX_MASS = 2**13      #% fast radix-2 fast-Fourier transform algorithm is used

M = N.array([378,234,65,75,6])               #% empirical formula, e.g. bovine insulin

A = N.zeros((MAX_ELEMENTS,MAX_MASS))#                 % isotopic abundancies stored in A

A[0,1:3]=[0.9998443,0.0001557]#                 % H
A[1,12:14]=[0.98889,0.01111]#                   % C
A[2,14:16]=[0.99634,0.00366]#                   % N
A[3,16:19]=[0.997628,0.000372,0.002000]#        % O
A[4,32:37]=[0.95018,0.00750,0.04215,0,0.00017]# % S (extend to other elements as needed)

tA=F.fft(A,axis=1)#                     % FFT along each element's isotopic distribution

ptA=N.ones(MAX_MASS);
for i in xrange(MAX_ELEMENTS-1):
    ptA = ptA*(tA[i,:]**M[i])#;         % multiply transforms (elementwise)


riptA=F.ifft(ptA).real#              % inverse FFT to get convolutions

id=N.zeros(MAX_MASS)
id[0:MAX_MASS-1]=riptA[1:MAX_MASS]#; % shift to real mass

print id.argmax(), id.max()
P.plot(riptA)
P.show()
Esempio n. 15
0
    def cal_isotopic(self):
        MAX_ELEMENTS=5+1  # add 1 due to mass correction 'element'
        MAX_ISOTOPES=4    # maxiumum # of isotopes for one element
        CUTOFF=1e-4       # relative intensity cutoff for plotting
        
        WINDOW_SIZE = 500
        #WINDOW_SIZE=input('Window size (in Da) ---> ');
        
        #RESOLUTION=input('Resolution (in Da) ----> ');  % mass unit used in vectors
        RESOLUTION = 1
        if RESOLUTION < 0.00001:#  % minimal mass step allowed
          RESOLUTION = 0.00001
        elif RESOLUTION > 0.5:  # maximal mass step allowed
          RESOLUTION = 0.5
        
        R=0.00001/RESOLUTION#  % R is used to scale nuclide masses (see below)
        
        WINDOW_SIZE=WINDOW_SIZE/RESOLUTION; 
        self.xx=WINDOW_SIZE  # convert window size to new mass units
        WINDOW_SIZE=self.next2pow();  # fast radix-2 fast-Fourier transform algorithm
        
        if WINDOW_SIZE < np.round(496708*R)+1:
          WINDOW_SIZE = self.next2pow(np.round(496708*R)+1)  # just to make sure window is big enough
        
        
        #H378 C254 N65 O75 S6
        M=np.array([int(self.H),int(self.C),int(self.N),int(self.O),0,0]) #% empiric formula, e.g. bovine insulin
        
        # isotopic abundances stored in matrix A (one row for each element)
        A=np.zeros((MAX_ELEMENTS,MAX_ISOTOPES,2));
        
        A[0][0,:] = [100783,0.9998443]#                 % 1H
        A[0][1,:] = [201410,0.0001557]#                 % 2H
        A[1][0,:] = [100000,0.98889]#                   % 12C
        A[1][1,:] = [200336,0.01111]#                   % 13C
        A[2][0,:] = [100307,0.99634]#                   % 14N
        A[2][1,:] = [200011,0.00366]#                   % 15N
        A[3][0,:] = [99492,0.997628]#                  % 16O
        A[3][1,:] = [199913,0.000372]#                  % 17O
        A[3][2,:] = [299916,0.002000]#                  % 18O
        A[4][0,:] = [97207,0.95018]#                   % 32S
        A[4][1,:] = [197146,0.00750]#                   % 33S
        A[4][2,:] = [296787,0.04215]#                   % 34S
        A[4][3,:] = [496708,0.00017]#                   % 36S
        A[5][0,:] = [100000,1.00000]#                   % for shifting mass so that Mmi is
        #                                             % near left limit of window
        
        Mmi=np.array([np.round(100783*R), np.round(100000*R),\
                     np.round(100307*R),np.round(99492*R), np.round(97207*R), 0])*M#  % (Virtual) monoisotopic mass in new units
        Mmi = Mmi.sum()
        #% mass shift so Mmi is in left limit of window:
        FOLDED=np.floor(Mmi/(WINDOW_SIZE-1))+1#  % folded FOLDED times (always one folding due to shift below)
        #% shift distribution to 1 Da from lower window limit:
        M[MAX_ELEMENTS-1]=np.ceil(((WINDOW_SIZE-1)-np.mod(Mmi,WINDOW_SIZE-1)+np.round(100000*R))*RESOLUTION)
        
        MASS_REMOVED=np.array([0,11,13,15,31,-1])*M#% correction for 'virtual' elements and mass shift
        begin=WINDOW_SIZE*RESOLUTION+MASS_REMOVED.sum()
        end=2*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED.sum()
        
        ptA=np.ones(WINDOW_SIZE);
        t_fft=0
        t_mult=0
        
        for i in xrange(MAX_ELEMENTS):
        
            tA=np.zeros(WINDOW_SIZE)
            for j in xrange(MAX_ISOTOPES):
                if A[i][j,0] != 0:
                    #removed +1 after R)+1 --we're using python
                    tA[np.round(A[i][j,0]*R)]=A[i][j,1]#;  % put isotopic distribution in tA
            t0 = time.clock()
            tA=F.fft(tA) # FFT along elements isotopic distribution  O(nlogn)
            t_fft = time.clock()-t0
            t0 = time.clock()
            tA=tA**M[i]#  % O(n)
            #################
            ptA = ptA*tA#  % O(n)#this is where it is messing UP
            #################
            t1 = time.clock()
            t_mult=t1-t0
        

        t0=time.clock()
        ptA=F.ifft(ptA).real#;  % O(nlogn)

        t0=time.clock()
        
        MA=np.linspace(begin,end,WINDOW_SIZE-1)
        ind=np.where(ptA>CUTOFF)[0]
        
        self.x = MA[ind]
        self.y = ptA[ind]
        self.x_min=int(np.min(self.x)-(np.max(self.x)-np.min(self.x)))
        self.x_max=int(np.min(self.x)+(np.max(self.x)-np.min(self.x)))
        
        self.mass_y=np.ones(len(self.x))
        mass_diff=np.ones(len(self.x))
        
        mzInd= np.logical_and((self.mz>=self.x_min),(self.mz<=self.x_max))
        self.mass_y=self.mass[mzInd]
        self.mass_x=self.mz[mzInd]
        
        
#         for i in range(len(self.x)):
#             self.mass_y[i]=self.mass[int(self.x[i])]
        self.massy=np.max(self.mass_y)
        print self.massy
        self.mass_y=self.mass_y/max(self.mass_y)*100
        self.y=self.y/max(self.y)*100
#        k=(self.mass_y*self.y).sum()/(self.mass_y*self.mass_y).sum()
#        self.fit=((k*self.mass_y-self.y)*(k*self.mass_y-self.y)).sum()/(self.y*self.y).sum()
        for i in range(len(self.y)):
            mass_diff[i]=np.abs(self.mass_y[i]-self.y[i])/(self.mass_y[i]+self.y[i])
        self.mass_diff=(1-mass_diff.sum()/len(mass_diff))*100
Esempio n. 16
0
def isotope(fVec,DAbund):
    '''
    %
    % Calculates isotopic distributions including isotopic fine structure
    % of molecules using FFT and various scaling 'tricks'. Easily adopted
    % to molecules of any elemental composition (by altering MAX_ELEMENTS
    % and the nuclide matrix A). To simulate spectra, convolute with peak
    % shape using FFT.
    %
    % (C) 1999 by Magnus Palmblad, Division of Ion Physics, Uppsala Univ.
    % Acknowledgements:
    % Lars Larsson-Cohn, Dept. of Mathematical Statistics, Uppsala Univ.,
    % for help on theory of convolutions and FFT.
    % Jan Axelsson, Div. of Ion Physics, Uppsala Univ. for comments and ideas
    %
    % Contact Magnus Palmblad at [email protected] if you should
    % have any questions or comments.
    %

    Converted to Python 1/10/08 by
    Brian H. Clowers [email protected]

    October 31, 2014
    Added Phosphorous and chemical formula parsing
    Added conditional specification of stable isotope composition
    Ben Bowen, [email protected]

    fVec is a vector representing the chemical formula including deuterium
    # [H, C, N, O, S, P, D]
    DAbund is the amount of deuterium [0-1], 0.05 is typical
    '''
    import numpy as np
    import numpy.fft.fftpack as F
    # import time
    # import pylab as P


    def next2pow(x):
        return 2**int(np.ceil(np.log(float(x))/np.log(2.0)))

    scaleFactor = 100000
    MAX_ELEMENTS=7+1  # add 1 due to mass correction 'element'
    MAX_ISOTOPES=4    # maxiumum # of isotopes for one element
    CUTOFF=1e-4     # relative intensity cutoff for plotting

    WINDOW_SIZE = 500
    #WINDOW_SIZE=input('Window size (in Da) ---> ');

    #RESOLUTION=input('Resolution (in Da) ----> ');  % mass unit used in vectors
    RESOLUTION = 0.5
    if RESOLUTION < 0.00001:#  % minimal mass step allowed
      RESOLUTION = 0.00001
    elif RESOLUTION > 0.5:  # maximal mass step allowed
      RESOLUTION = 0.5

    R=0.00001/RESOLUTION#  % R is used to scale nuclide masses (see below)

    WINDOW_SIZE=WINDOW_SIZE/RESOLUTION;   # convert window size to new mass units
    WINDOW_SIZE=next2pow(WINDOW_SIZE);  # fast radix-2 fast-Fourier transform algorithm

    if WINDOW_SIZE < np.round(496708*R)+1:
      WINDOW_SIZE = nextpow2(np.round(496708*R)+1)  # just to make sure window is big enough

    # print 'Vector size: 1x%d'%WINDOW_SIZE

    #H378 C254 N65 O75 S6
    
    # M=np.array([378,254,65,75,6,0]) #% empiric formula, e.g. bovine insulin
    M=np.array(fVec) #% empiric formula, e.g. bovine insulin

    # isotopic abundances stored in matrix A (one row for each element)
    A=np.zeros((MAX_ELEMENTS,MAX_ISOTOPES,2));

    A[0][0,:] = [100783,0.9998443]#                 % 1H
    A[0][1,:] = [201410,0.0001557]#                 % 2H
    A[1][0,:] = [100000,0.98889]#                   % 12C
    A[1][1,:] = [200336,0.01111]#                   % 13C
    A[2][0,:] = [100307,0.99634]#                   % 14N
    A[2][1,:] = [200011,0.00366]#                   % 15N
    A[3][0,:] = [99492,0.997628]#                  % 16O
    A[3][1,:] = [199913,0.000372]#                  % 17O
    A[3][2,:] = [299916,0.002000]#                  % 18O
    A[4][0,:] = [97207,0.95018]#                   % 32S
    A[4][1,:] = [197146,0.00750]#                   % 33S
    A[4][2,:] = [296787,0.04215]#                   % 34S
    A[4][3,:] = [496708,0.00017]#                   % 36S
    A[5][0,:] = [97376,1.0]# Phosphorous
    A[6][0,:] = [100783,1.0-DAbund]#                 % 1H
    A[6][1,:] = [201410,DAbund]#                 % 2H
    A[7][0,:] = [100000,1.00000]#                   % for shifting mass so that Mmi is
    #                                             % near left limit of window
    mass_removed_vec = [0,11,13,15,31,30,0,-1]
    monoisotopic = 0.0
    for i,e in enumerate(fVec):
        monoisotopic = monoisotopic + ( (mass_removed_vec[i]*scaleFactor+A[i][0,0])*e / scaleFactor)

    Mmi=np.array([np.round(100783*R), np.round(100000*R),\
                 np.round(100307*R), np.round(99492*R), np.round(97207*R), np.round(97376*R), np.round(100783*R), 0])*M#  % (Virtual) monoisotopic mass in new units
    Mmi = Mmi.sum()
    #% mass shift so Mmi is in left limit of window:
    #print "Mmi",Mmi
    #print "Window", WINDOW_SIZE
    FOLDED=np.floor(Mmi/(WINDOW_SIZE-1))+1#  % folded FOLDED times (always one folding due to shift below)

    #% shift distribution to 1 Da from lower window limit:
    M[MAX_ELEMENTS-1]=np.ceil(((WINDOW_SIZE-1)-np.mod(Mmi,WINDOW_SIZE-1)+np.round(100000*R))*RESOLUTION)
    MASS_REMOVED=np.array(mass_removed_vec)*M#';  % correction for 'virtual' elements and mass shift
    MASS_REMOVED = MASS_REMOVED.sum()

    ptA=np.ones(WINDOW_SIZE);
    t_fft=0
    t_mult=0

    for i in xrange(MAX_ELEMENTS):
        tA=np.zeros(WINDOW_SIZE)
        for j in xrange(MAX_ISOTOPES):
            if A[i][j,0] != 0:
                tA[np.round(A[i][j,0]*R)]=A[i][j,1]#;  % put isotopic distribution in tA

        tA=F.fft(tA) # FFT along elements isotopic distribution  O(nlogn)
        tA=tA**M[i]#  % O(n)
        ptA = ptA*tA#  % O(n)#this is where it is messing UP

    ptA=F.ifft(ptA).real#;  % O(nlogn)

    start = (FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED
    stop = WINDOW_SIZE - 1

    MA=np.linspace((FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED, WINDOW_SIZE-1)

    ind=np.where(ptA>CUTOFF)[0]

    x = MA[ind]
    y = ptA[ind]

    for i,xi in enumerate(x):
        x[i] = monoisotopic + (i*1.003355)


    return x,y,monoisotopic
Esempio n. 17
0
def Proj_F1(s):
    N = len(s)
    return ifft(Proj_M1(fft(s, 2 * N)))[0:N]
Esempio n. 18
0
from numpy.fft.helper import fftshift
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rcParams

rcParams[
    'animation.convert_path'] = "C:\Program Files\ImageMagick-6.9.3\convert.exe"

nx = 5001
nfreq = 2048
x = linspace(-20, 20, nx)
omega = linspace(0, 4, nfreq)
kappa = 2 * arcsin(abs(omega / 2) + 0j)
ricker_amp = sqrt(pi) * 4 * omega**2 * exp(-omega**2)
TX = exp(1j * outer(ricker_amp * kappa, x))
X = ifft(TX, axis=0)
magX = np.real(X)


def update(k):
    ax.cla()
    plt.plot(x, magX[k, :])
    plt.ylim(-1, 2)


nframes = int(nfreq / 2)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ani = animation.FuncAnimation(fig,
                              update,
                              frames=range(nframes),
Esempio n. 19
0
def Proj_F1(s):
    N   = len(s)
    return ifft(Proj_M1(fft(s, 2*N)))[0:N]
def compute_e_fields_secs(sb, tf_path, e_site, samp, hi, low, error_bf, ef_tf,
                          e_nvpwa, stat):
    """ Compute E fields using magnetic time series from SECS
		
		Parameters
		-----------
         sb = magnetic time series from SECS
         tf_path = Folder with electromagnetic tensor relationships
         e_site = Name of the site to compute electric fields
         samp = Sampling rate (seconds)
         hi = Maximum period to analyse (seconds)
         low = Minimum period to analyse (seconds)
         error_bf = Error associated with the SECS interpolation approach
         ef_tf = Error floor for the MT and quasi-MT tensor relationships
         e_ncpwa =  Error floor for the Non-plane wave approximation
         stat = Statistics for error propagation

         
		Returns
		-----------
         tf_ex = electric time series x component (mean value)
         tf_ey = electric time series y component (mean value)
         std_ex = standard deviation of the electric time series x component
         std_ey = standard deviation of the electric time series y component
         
         -----------------------------------------------------------
	"""

    s_bx = sb[:, 0] * 1e-9  # convert to nT
    s_by = sb[:, 1] * 1e-9  # convert to nT

    # Remove detrend
    s_bx = scipy.signal.detrend(s_bx)
    s_by = scipy.signal.detrend(s_by)

    # Tukey window to avoid inestabilities at the edges of the time series
    window = scipy.signal.tukey(s_by.shape[0], 0.1)
    s_bx = window * s_bx
    s_by = window * s_by

    # get Frequencies / periods
    freqB = np.fft.fftfreq(s_bx.shape[0], d=samp)
    for i in range(0, len(freqB)):
        if freqB[i] == 0:
            freqB[i] = 1e-99

    perB = freqB**-1

    # Compute fft
    s_bx_fft = np.fft.fftpack.fft(s_bx)
    s_by_fft = np.fft.fftpack.fft(s_by)

    # Read tensors
    file_format = -1
    try:
        filename = (str(tf_path) + "E" + str(e_site) + "B" + str(e_site) +
                    "_s.j")

        print(filename)
        f = open(filename, 'r')
        data = f.readlines()
        f.close()
        print('j file')
        file_format = 0
    except:
        check = 0

    try:
        filename = (str(tf_path) + "E" + str(e_site) + "B" + str(e_site) +
                    "_s.edi")

        f = open(filename, 'r')
        data = f.readlines()
        f.close()
        print('edi file')
        print(filename)
        file_format = 1
    except:
        check = 1

    if file_format == 0:
        try:
            for index, line in enumerate(data):
                if line.startswith("ZXX"):
                    index_zxx = index
                if line.startswith("ZXY"):
                    index_zxy = index
                if line.startswith("ZYX"):
                    index_zyx = index
                if line.startswith("ZYY"):
                    index_zyy = index
                if line.startswith("TZX"):
                    index_tzx = index
                if line.startswith("TZY"):
                    index_tzy = index

            data_zxx = data[index_zxx + 2:index_zxy]
            zxx = np.loadtxt(data_zxx)
            data_zxy = data[index_zxy + 2:index_zyx]
            zxy = np.loadtxt(data_zxy)
            data_zyx = data[index_zyx + 2:index_zyy]
            zyx = np.loadtxt(data_zyx)
            data_zyy = data[index_zyy + 2:index_tzx]
            zyy = np.loadtxt(data_zyy)
            per_z = zxx[:, 0]

            zxx[:, 1:3] = (1) * zxx[:, 1:3]
            zxy[:, 1:3] = (1) * zxy[:, 1:3]
            zyx[:, 1:3] = (1) * zyx[:, 1:3]
            zyy[:, 1:3] = (1) * zyy[:, 1:3]

            print(per_z)
        except:
            check = 2

    if file_format == 1:
        try:
            for index, line in enumerate(data):
                line = line.strip(
                )  # remove leading and trailing whitespaces LJW 2019-04-15
                if 'NFREQ' in line[:5]:
                    for j in range(0, len(line)):
                        if line[j] == '=':
                            n_freq = int(line[j + 1::])

                if 'NPER' in line[:5]:
                    for j in range(0, len(line)):
                        if line[j] == '=':
                            n_freq = int(line[j + 1::])

                if '>FREQ' in line[:5]:
                    freq = read_variable(n_freq, index, data)

                if '>PERI' in line[:5]:
                    per = read_variable(n_freq, index, data)
                    freq = 1. / per

                if '>ZROT' in line[:5]:
                    zrot = read_variable(n_freq, index, data)

                if '>ZXXR' in line[:5]:
                    zxxr = read_variable(n_freq, index, data)

                if '>ZXXI' in line[:5]:
                    zxxi = read_variable(n_freq, index, data)

                if '>ZXX.V' in line[:6]:
                    zxxv = read_variable(n_freq, index, data)
                    zxxstd = np.sqrt(zxxv)

                if '>ZXYR' in line[:5]:
                    zxyr = read_variable(n_freq, index, data)

                if '>ZXYI' in line[:5]:
                    zxyi = read_variable(n_freq, index, data)

                if '>ZXY.V' in line[:6]:
                    zxyv = read_variable(n_freq, index, data)
                    zxystd = np.sqrt(zxyv)

                if '>ZYXR' in line[:5]:
                    zyxr = read_variable(n_freq, index, data)
                if '>ZYXI' in line[:5]:
                    zyxi = read_variable(n_freq, index, data)

                if '>ZYX.V' in line[:6]:
                    zyxv = read_variable(n_freq, index, data)
                    zyxstd = np.sqrt(zyxv)

                if '>ZYYR' in line[:5]:
                    zyyr = read_variable(n_freq, index, data)

                if '>ZYYI' in line[:5]:
                    zyyi = read_variable(n_freq, index, data)

                if '>ZYY.V' in line[:6]:
                    zyyv = read_variable(n_freq, index, data)
                    zyystd = np.sqrt(zyyv)
            try:
                periods = 1. / freq
            except:
                periods = per

            zxx = np.column_stack([periods, -1 * zxxr, -1 * zxxi, zxxstd])
            zxy = np.column_stack([periods, -1 * zxyr, -1 * zxyi, zxystd])
            zyx = np.column_stack([periods, -1 * zyxr, -1 * zyxi, zyxstd])
            zyy = np.column_stack([periods, -1 * zyyr, -1 * zyyi, zyystd])
            per_z = zxx[:, 0]

            if per_z[0] > per_z[1]:
                per_z = per_z[::-1]
                zxx = zxx[::-1]
                zxy = zxy[::-1]
                zyx = zyx[::-1]
                zyy = zyy[::-1]

        except:
            check = 3

    if file_format == -1:
        print('Cannot read the MT impedance tensor for site:' + str(e_site))
        print('MT impdeance tensor must be j. or edi. file')

    # Select the periods of interest
    factor = np.ones(perB.shape[0])
    zxx_int = np.zeros([perB.shape[0], 3])
    zxy_int = np.zeros([perB.shape[0], 3])
    zyx_int = np.zeros([perB.shape[0], 3])
    zyy_int = np.zeros([perB.shape[0], 3])

    for i, v in enumerate(perB):
        if (v < low) or (v > hi):
            factor[i] = 0

    for i in range(0, 3):
        zxx_int[:, i] = np.interp(perB, per_z, zxx[:, i + 1]) * factor
        zxy_int[:, i] = np.interp(perB, per_z, zxy[:, i + 1]) * factor
        zyx_int[:, i] = np.interp(perB, per_z, zyx[:, i + 1]) * factor
        zyy_int[:, i] = np.interp(perB, per_z, zyy[:, i + 1]) * factor

    # Deffine Variables
    ex_calc = np.zeros([s_bx.shape[0], stat])
    ey_calc = np.zeros([s_bx.shape[0], stat])

    # Deffine Error floor
    zzz_det = np.sqrt(
        np.abs(((zxy_int[:, 0] + zxy_int[:, 1] * 1j) *
                (zyx_int[:, 0] + zyx_int[:, 1] * 1j)) -
               ((zxx_int[:, 0] + zxx_int[:, 1] * 1j) *
                (zyy_int[:, 0] + zyy_int[:, 1] * 1j))))

    for ik in range(0, len(zxx_int)):
        if zxx_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zxx_int[ik, 2] = zzz_det[ik] * ef_tf
        if zxy_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zxy_int[ik, 2] = zzz_det[ik] * ef_tf
        if zyx_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zyx_int[ik, 2] = zzz_det[ik] * ef_tf
        if zyy_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zyy_int[ik, 2] = zzz_det[ik] * ef_tf

    # Compute electric fields
    for i in range(0, stat):
        ex_1 = (
            (((zxx_int[:, 0] + np.random.standard_normal() * zxx_int[:, 2]) +
              (zxx_int[:, 1] + np.random.standard_normal() * zxx_int[:, 2]) *
              1j) * (s_bx_fft + s_bx_fft * np.random.standard_normal() *
                     (error_bf))) +
            (((zxy_int[:, 0] + np.random.standard_normal() * zxy_int[:, 2]) +
              (zxy_int[:, 1] + np.random.standard_normal() * zxy_int[:, 2]) *
              1j) * (s_by_fft + s_by_fft * np.random.standard_normal() *
                     (error_bf))))

        ey_1 = (
            (((zyx_int[:, 0] + np.random.standard_normal() * zyx_int[:, 2]) +
              (zyx_int[:, 1] + np.random.standard_normal() * zyx_int[:, 2]) *
              1j) * (s_bx_fft + s_bx_fft * np.random.standard_normal() *
                     (error_bf))) +
            (((zyy_int[:, 0] + np.random.standard_normal() * zyy_int[:, 2]) +
              (zyy_int[:, 1] + np.random.standard_normal() * zyy_int[:, 2]) *
              1j) * (s_by_fft + s_by_fft * np.random.standard_normal() *
                     (error_bf))))

        # Add error associated with the non-validity of the planewave approx.
        ex_1 = ex_1 + np.random.standard_normal() * ex_1 * e_nvpwa
        ey_1 = ey_1 + np.random.standard_normal() * ey_1 * e_nvpwa

        # Compute ifft
        ex_calc[:,
                i] = (np.real(ifft(ex_1 + np.conj(np.roll(ex_1[::-1], 1)))) *
                      (1000. / (4 * np.pi * 1e-7)))
        ey_calc[:,
                i] = (np.real(ifft(ey_1 + np.conj(np.roll(ey_1[::-1], 1)))) *
                      (1000. / (4 * np.pi * 1e-7)))

    # Define mean value
    mex = np.zeros(s_bx.shape[0])
    mey = np.zeros(s_bx.shape[0])

    for i in range(s_bx.shape[0]):
        mex[i] = ex_calc[i, :].mean()
        mey[i] = ey_calc[i, :].mean()

    # Calculate standard deviation (errorbars)
    ex_s = np.zeros(s_bx.shape[0])
    ey_s = np.zeros(s_bx.shape[0])

    for i in range(s_bx.shape[0]):
        ex_s[i] = ex_calc[i, :].std()
        ey_s[i] = ey_calc[i, :].std()

    # Deffine the outputs
    tf_ex = np.array(np.copy(mex))
    tf_ey = np.array(np.copy(mey))
    std_ex = np.array(np.copy(ex_s))
    std_ey = np.array(np.copy(ey_s))

    return (tf_ex, tf_ey, std_ex, std_ey)
def compute_regional_b_field(in_path, sites, reg_ref, mag_path, tf_path, samp,
                             hi, low, ef_h, stat):
    """ Calculate regional b field at the magnetic observatories using
        reference magnetic field and inter-station tensor relationships
		
		Parameters
		-----------
		in_path = Folder with input parameters
		sites = Name of the site
         reg_ref = Reference magnetic field (e.g. CLF in Campanya et al. 2018)
         mag_path = Folder with magnetic fields time series
         tf_path = Folder with electromagnetic tensor relationships
         samp = Sampling rate (seconds)
         hi = Maximum period to analyse (seconds)
         low = Minimum period to analyse (seconds)
         ef_h = Error floor for H tensor relationship
         stat = Statistics for error propagation
         
         
		Returns
		-----------
         c =  Computed magnetic fields
         std_c = Standard devaition for the computed magnetic fields
		-----------------------------------------------------------------
	"""

    # Check the magnetic observatories used in the experiment
    s, s_lat, s_lon = read_co(str(in_path) + str(sites))

    # Read the magnetic time series from Reference magnetic field
    x_input = str(mag_path) + str(reg_ref) + "Bx.txt"
    y_input = str(mag_path) + str(reg_ref) + "By.txt"

    f = open(x_input, 'r')
    reg_refbx = np.loadtxt(f)
    f.close()

    f = open(y_input, 'r')
    reg_refby = np.loadtxt(f)
    f.close()

    # Deffine variables
    c = np.zeros([len(reg_refbx), len(s), 2])
    std_c = np.zeros([len(reg_refbx), len(s), 2])

    # Calculate detrend
    reg_refbx = scipy.signal.detrend(reg_refbx)
    reg_refby = scipy.signal.detrend(reg_refby)

    # Compute fft
    perd, reg_refbx_fft, reg_refby_fft = scr_fft(reg_refbx, reg_refby, samp)

    # Calculate spectra at the sites of interest
    for ip in range(0, len(s)):
        mag = str(s[ip])

        # Read inter-station transfer functions
        filename = tf_path + "B" + str(mag) + "B" + str(reg_ref) + "_s.j"

        f = open(filename, 'r')
        data = f.readlines()
        f.close()

        # Read the components of the tensors
        for index, line in enumerate(data):
            if line.startswith('ZXX'):
                index_zxx = index
            if line.startswith("ZXY"):
                index_zxy = index
            if line.startswith("ZYX"):
                index_zyx = index
            if line.startswith("ZYY"):
                index_zyy = index
            if line.startswith("TZX"):
                index_tzx = index
            if line.startswith("TZY"):
                index_tzy = index

        data_zxx = data[index_zxx + 2:index_zxy]
        zxx = np.loadtxt(data_zxx)
        data_zxy = data[index_zxy + 2:index_zyx]
        zxy = np.loadtxt(data_zxy)
        data_zyx = data[index_zyx + 2:index_zyy]
        zyx = np.loadtxt(data_zyx)
        data_zyy = data[index_zyy + 2:index_tzx]
        zyy = np.loadtxt(data_zyy)
        per_z = zxx[:, 0]

        # Deffine variables
        factor = np.ones(perd.shape[0])
        zxx_int = np.zeros([perd.shape[0], 3])
        zxy_int = np.zeros([perd.shape[0], 3])
        zyx_int = np.zeros([perd.shape[0], 3])
        zyy_int = np.zeros([perd.shape[0], 3])

        # select periods of interest
        for i, v in enumerate(perd):
            if (v < low) or (v > hi):
                factor[i] = 0

        for i in range(0, 3):
            zxx_int[:, i] = np.interp(perd, per_z, zxx[:, i + 1]) * factor
            zxy_int[:, i] = np.interp(perd, per_z, zxy[:, i + 1]) * factor
            zyx_int[:, i] = np.interp(perd, per_z, zyx[:, i + 1]) * factor
            zyy_int[:, i] = np.interp(perd, per_z, zyy[:, i + 1]) * factor

        # Deffine variables
        bx_calc = np.zeros([reg_refbx.shape[0], stat])
        by_calc = np.zeros([reg_refby.shape[0], stat])

        for ik in range(0, len(zxx_int)):
            if zxx_int[ik, 2] <= ef_h:
                zxx_int[ik, 2] = ef_h
            if zxy_int[ik, 2] <= ef_h:
                zxy_int[ik, 2] = ef_h
            if zyx_int[ik, 2] <= ef_h:
                zyx_int[ik, 2] = ef_h
            if zyy_int[ik, 2] <= ef_h:
                zyy_int[ik, 2] = ef_h

        # Calculate B fields using ITF
        for i in range(0, stat):

            bx_1 = (((
                (zxx_int[:, 0] + np.random.standard_normal() * zxx_int[:, 2]) +
                (zxx_int[:, 1] + np.random.standard_normal() * zxx_int[:, 2]) *
                1j
            ) * reg_refbx_fft) + ((
                (zxy_int[:, 0] + np.random.standard_normal() * zxy_int[:, 2]) +
                (zxy_int[:, 1] + np.random.standard_normal() * zxy_int[:, 2]) *
                1j) * reg_refby_fft))

            by_1 = (((
                (zyx_int[:, 0] + np.random.standard_normal() * zyx_int[:, 2]) +
                (zyx_int[:, 1] + np.random.standard_normal() * zyx_int[:, 2]) *
                1j
            ) * reg_refbx_fft) + ((
                (zyy_int[:, 0] + np.random.standard_normal() * zyy_int[:, 2]) +
                (zyy_int[:, 1] + np.random.standard_normal() * zyy_int[:, 2]) *
                1j) * reg_refby_fft))

            bx_calc[:, i] = ifft(bx_1 + np.conj(np.roll(bx_1[::-1], 1))).real
            by_calc[:, i] = ifft(by_1 + np.conj(np.roll(by_1[::-1], 1))).real

        # Define mean value
        me_bx = np.zeros(reg_refbx.shape[0])
        me_by = np.zeros(reg_refbx.shape[0])

        for i in range(0, reg_refbx.shape[0]):
            me_bx[i] = bx_calc[i, :].mean()
            me_by[i] = by_calc[i, :].mean()

        # Calculate standard deviation
        bx_s = np.zeros(reg_refbx.shape[0])
        by_s = np.zeros(reg_refbx.shape[0])

        for i in range(0, reg_refbx.shape[0]):
            bx_s[i] = bx_calc[i, :].std()
            by_s[i] = by_calc[i, :].std()

        # Deffine the output vectors
        tf_bx = np.array([np.copy(me_bx)]).T
        tf_by = np.array([np.copy(me_by)]).T
        std_bx = np.array([np.copy(bx_s)]).T
        std_by = np.array([np.copy(by_s)]).T

        # Arrays with 1) computed b fields and 2) standard deviation
        c[:, ip, :] = np.hstack((tf_bx, tf_by))
        std_c[:, ip, :] = np.hstack((std_bx, std_by))

    return (c, std_c)
def read_magnetics(in_path, sites, mag_path, secs_path, samp, hi, low, length,
                   storm, var):
    """ Read magnetic time series from input folder
		
		Parameters
		-----------
		in_path = Folder with input parameters
		sites = Name of the site
         mag_path = Folder with magnetic fields time series
         secs_path = Folder with inputs - outputs for SECS interpolation
         samp = Sampling rate (seconds)
         hi = maximum period to analyse (seconds)
         low = minimum period to analyse (seconds)
         length = length of the time series
         storm = Folder with input magnetic time series
         var = if (1) write the magnetic time series

		Returns
		-----------
		mag_s = Magnetic time series
		s = Name of the site

		-----------------------------------------------------------------
	"""

    # Check the magnetic observatories used in the experiment
    s, s_lat, s_lon = read_co(in_path + str(sites))
    mag_s = np.zeros([length, len(s), 2])

    # Read the time series for these magnetic fields
    for ip in range(0, len(s)):
        mag = str(s[ip])

        x_input = str(mag_path) + str(mag) + "Bx.txt"
        y_input = str(mag_path) + str(mag) + "By.txt"

        f = open(x_input, 'r')
        dif_bx = np.loadtxt(f)
        f.close()

        f = open(y_input, 'r')
        dif_by = np.loadtxt(f)
        f.close()

        # Remove detrend
        dif_bx = scipy.signal.detrend(dif_bx)
        dif_by = scipy.signal.detrend(dif_by)

        # compute FFT
        perd, dif_bx_fft, dif_by_fft = scr_fft(dif_bx, dif_by, samp)

        # Select periods of interest
        factor = np.ones(perd.shape[0])

        # bypass the band filter - LJW 2019-04-09

        for i, v in enumerate(perd):
            if (v < low) or (v > hi):
                factor[i] = 0

        bx_cfft = (factor * dif_bx_fft)
        by_cfft = (factor * dif_by_fft)

        # Compute ifft
        bx_c = np.real(
            np.array(ifft(bx_cfft + np.conj(np.roll(bx_cfft[::-1], 1)))))

        by_c = np.real(
            np.array(ifft(by_cfft + np.conj(np.roll(by_cfft[::-1], 1)))))

        # Deffine the vector of magnetic field data
        ax = np.array([bx_c]).T
        ay = np.array([by_c]).T
        mag_s[:, ip, :] = np.hstack((ax, ay))

    # Write the magnetic data to be used by the SECS interpolation algorithm
    if var == 1:
        for ip in range(0, len(s)):
            mag = str(s[ip])
            w_path = (str(secs_path) + "SECS_" + str(mag) + str(storm) +
                      ".dat")

            f_id = open(w_path, 'w+')
            np.savetxt(f_id, mag_s[:, ip, :], fmt=['%15.5f', '%15.5f'])
            f_id.close()

    return (mag_s, s)
from numpy.fft.helper import fftshift
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rcParams


rcParams['animation.convert_path'] = "C:\Program Files\ImageMagick-6.9.3\convert.exe"

nx = 5001
nfreq = 2048
x = linspace(-20, 20, nx)
omega = linspace(0, 4, nfreq)
kappa = 2*arcsin(abs(omega/2) + 0j)
ricker_amp = sqrt(pi)*4*omega**2*exp(-omega**2)
TX = exp(1j*outer(ricker_amp*kappa, x))
X = ifft(TX, axis=0)
magX = np.real(X)


def update(k):
    ax.cla()
    plt.plot(x, magX[k, :])
    plt.ylim(-1, 2)


nframes = int(nfreq/2)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ani = animation.FuncAnimation(fig, update, frames=range(nframes), blit=False,
                              repeat=False)
def compute_e_fields(sb, std_sb, tf_path, e_site, b_site, samp, hi, low, ef_tf,
                     e_nvpwa, stat):
    """ Compute E fields using magnetic time series from SECS
		
		Parameters
		-----------
         sb = magnetic time series
         std_sb = standard deviation of the magnetic time series
         tf_path = Folder with electromagnetic tensor relationships
         e_site = Name of the site to compute electric fields
         b_site = Name of the site for which the magnetic fields where
                  used to compute electric fields
         samp = Sampling rate (seconds)
         hi = Maximum period to analyse (seconds)
         low = Minimum period to analyse (seconds)
         ef_tf = Error floor for the MT and quasi-MT tensor relationships
         e_ncpwa =  Error floor for the Non-plane wave approximation
         stat = Statistics for error propagation

         
		Returns
		-----------
         tf_ex = electric time series x component (mean value)
         tf_ey = electric time series y component (mean value)
         std_ex = standard deviation of the electric time series x component
         std_ey = standard deviation of the electric time series y component
         
         -----------------------------------------------------------
	"""
    ###################################################################
    # Read magnetic data and convert from nT to T
    s_bx = sb[:, 0] * 1e-9
    s_by = sb[:, 1] * 1e-9
    std_s_bx = std_sb[:, 0] * 1e-9
    std_s_by = std_sb[:, 1] * 1e-9

    ###################################################################
    # Compute electric fields
    s_bx = scipy.signal.detrend(s_bx)
    s_by = scipy.signal.detrend(s_by)

    # Tukey window
    window = scipy.signal.tukey(s_by.shape[0], 0.1)
    s_bx = window * s_bx
    s_by = window * s_by

    # get Frequencies
    freqB = np.fft.fftfreq(s_bx.shape[0], d=samp)
    for i in range(0, len(freqB)):
        if freqB[i] == 0:
            freqB[i] = 1e-99

    perB = freqB**-1

    # Read tensors
    filename = str(tf_path) + "E" + str(e_site) + "B" + str(b_site) + "_s.j"
    f = open(filename, 'r')
    data = f.readlines()
    f.close()

    for index, line in enumerate(data):
        if line.startswith("ZXX"):
            index_zxx = index
        if line.startswith("ZXY"):
            index_zxy = index
        if line.startswith("ZYX"):
            index_zyx = index
        if line.startswith("ZYY"):
            index_zyy = index
        if line.startswith("TZX"):
            index_tzx = index
        if line.startswith("TZY"):
            index_tzy = index

    data_zxx = data[index_zxx + 2:index_zxy]
    zxx = np.loadtxt(data_zxx)
    data_zxy = data[index_zxy + 2:index_zyx]
    zxy = np.loadtxt(data_zxy)
    data_zyx = data[index_zyx + 2:index_zyy]
    zyx = np.loadtxt(data_zyx)
    data_zyy = data[index_zyy + 2:index_tzx]
    zyy = np.loadtxt(data_zyy)
    per_z = zxx[:, 0]

    # Select the periods we are interested on
    factor = np.ones(perB.shape[0])
    zxx_int = np.zeros([perB.shape[0], 3])
    zxy_int = np.zeros([perB.shape[0], 3])
    zyx_int = np.zeros([perB.shape[0], 3])
    zyy_int = np.zeros([perB.shape[0], 3])

    for i, v in enumerate(perB):
        if (v < low) or (v > hi):
            factor[i] = 0

    for i in range(0, 3):
        zxx_int[:, i] = np.interp(perB, per_z, zxx[:, i + 1]) * factor
        zxy_int[:, i] = np.interp(perB, per_z, zxy[:, i + 1]) * factor
        zyx_int[:, i] = np.interp(perB, per_z, zyx[:, i + 1]) * factor
        zyy_int[:, i] = np.interp(perB, per_z, zyy[:, i + 1]) * factor

    # Calculate and propagate errors
    ex_calc = np.zeros([s_bx.shape[0], stat])
    ey_calc = np.zeros([s_bx.shape[0], stat])

    # Calculate error floor
    zzz_det = np.sqrt(
        np.abs(((zxy_int[:, 0] + zxy_int[:, 1] * 1j) *
                (zyx_int[:, 0] + zyx_int[:, 1] * 1j)) -
               ((zxx_int[:, 0] + zxx_int[:, 1] * 1j) *
                (zyy_int[:, 0] + zyy_int[:, 1] * 1j))))

    for ik in range(0, len(zxx_int)):
        if zxx_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zxx_int[ik, 2] = zzz_det[ik] * ef_tf
        if zxy_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zxy_int[ik, 2] = zzz_det[ik] * ef_tf
        if zyx_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zyx_int[ik, 2] = zzz_det[ik] * ef_tf
        if zyy_int[ik, 2] <= zzz_det[ik] * ef_tf:
            zyy_int[ik, 2] = zzz_det[ik] * ef_tf

    for i in range(0, stat):
        # Compute the electrics
        s_bx_fft = np.fft.fftpack.fft(s_bx +
                                      np.random.standard_normal(len(s_bx)) *
                                      std_s_bx)
        s_by_fft = np.fft.fftpack.fft(s_by +
                                      np.random.standard_normal(len(s_by)) *
                                      std_s_by)

        ex_1 = (((
            (zxx_int[:, 0] + np.random.standard_normal() * zxx_int[:, 2]) +
            (zxx_int[:, 1] + np.random.standard_normal() * zxx_int[:, 2]) * 1j
        ) * (s_bx_fft)) + ((
            (zxy_int[:, 0] + np.random.standard_normal() * zxy_int[:, 2]) +
            (zxy_int[:, 1] + np.random.standard_normal() * zxy_int[:, 2]) * 1j)
                           * (s_by_fft)))

        ey_1 = (((
            (zyx_int[:, 0] + np.random.standard_normal() * zyx_int[:, 2]) +
            (zyx_int[:, 1] + np.random.standard_normal() * zyx_int[:, 2]) * 1j
        ) * (s_bx_fft)) + ((
            (zyy_int[:, 0] + np.random.standard_normal() * zyy_int[:, 2]) +
            (zyy_int[:, 1] + np.random.standard_normal() * zyy_int[:, 2]) * 1j)
                           * (s_by_fft)))

        # Add error associated with the non-validity of the plane wave approx.
        ex_1 = ex_1 + np.random.standard_normal() * ex_1 * e_nvpwa
        ey_1 = ey_1 + np.random.standard_normal() * ey_1 * e_nvpwa

        # Compute ifft
        ex_calc[:, i] = (ifft(ex_1 + np.conj(np.roll(ex_1[::-1], 1))).real *
                         (1000. / (4 * np.pi * 1e-7)))
        ey_calc[:, i] = (ifft(ey_1 + np.conj(np.roll(ey_1[::-1], 1))).real *
                         (1000. / (4 * np.pi * 1e-7)))

    # Calculate the electric fields (mean value)
    mex = np.zeros(s_bx.shape[0])
    mey = np.zeros(s_bx.shape[0])

    for i in range(s_bx.shape[0]):
        mex[i] = ex_calc[i, :].mean()
        mey[i] = ey_calc[i, :].mean()

    # Calculate standard deviation / errorbars
    ex_s = np.zeros(s_bx.shape[0])
    ey_s = np.zeros(s_bx.shape[0])

    for i in range(s_bx.shape[0]):
        ex_s[i] = ex_calc[i, :].std()
        ey_s[i] = ey_calc[i, :].std()

    # Deffine outputs
    tf_ex = np.array(np.copy(mex))
    tf_ey = np.array(np.copy(mey))
    std_ex = np.array(np.copy(ex_s))
    std_ey = np.array(np.copy(ey_s))

    return (tf_ex, tf_ey, std_ex, std_ey)
Esempio n. 25
0
    print 'Multiply vectors...'
    t0 = time.clock()
    tA=tA**M[i]#  % O(n)
    #################
    ptA = ptA*tA#  % O(n)#this is where it is messing UP
    #################
    t1 = time.clock()
    t_mult=t1-t0


print 'Time for FFT: %4.2f s'%t_fft
print 'Time for multiplications: %4.2f s'%t_mult

print 'Calculate IFFT...'
t0=time.clock()
ptA=F.ifft(ptA).real#;  % O(nlogn)

print 'Time for IFFT: %4.2f s'%(time.clock()-t0)

print 'Plotting...'
t0=time.clock()


start = (FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED
stop = WINDOW_SIZE - 1

MA=N.linspace((FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED, WINDOW_SIZE-1)

ind=N.where(ptA>CUTOFF)[0]

x = MA[ind]
def isotope(fVec,DAbund):
	'''
	%
	% Calculates isotopic distributions including isotopic fine structure
	% of molecules using FFT and various scaling 'tricks'. Easily adopted
	% to molecules of any elemental composition (by altering MAX_ELEMENTS
	% and the nuclide matrix A). To simulate spectra, convolute with peak
	% shape using FFT.
	%
	% (C) 1999 by Magnus Palmblad, Division of Ion Physics, Uppsala Univ.
	% Acknowledgements:
	% Lars Larsson-Cohn, Dept. of Mathematical Statistics, Uppsala Univ.,
	% for help on theory of convolutions and FFT.
	% Jan Axelsson, Div. of Ion Physics, Uppsala Univ. for comments and ideas
	%
	% Contact Magnus Palmblad at [email protected] if you should
	% have any questions or comments.
	%

	Converted to Python 1/10/08 by
	Brian H. Clowers [email protected]

	October 31, 2014
	Added Phosphorous and chemical formula parsing
	Added conditional specification of stable isotope composition
	Ben Bowen, [email protected]

	fVec is a vector representing the chemical formula including deuterium
	# [H, C, N, O, S, P, D]
	DAbund is the amount of deuterium [0-1], 0.05 is typical
	'''
	import numpy as np
	import numpy.fft.fftpack as F
	# import time
	# import pylab as P


	def next2pow(x):
	    return 2**int(np.ceil(np.log(float(x))/np.log(2.0)))

	scaleFactor = 100000
	MAX_ELEMENTS=7+1  # add 1 due to mass correction 'element'
	MAX_ISOTOPES=4    # maxiumum # of isotopes for one element
	CUTOFF=1e-4    # relative intensity cutoff for plotting

	WINDOW_SIZE = 500
	#WINDOW_SIZE=input('Window size (in Da) ---> ');

	#RESOLUTION=input('Resolution (in Da) ----> ');  % mass unit used in vectors
	RESOLUTION = 0.5
	if RESOLUTION < 0.00001:#  % minimal mass step allowed
	  RESOLUTION = 0.00001
	elif RESOLUTION > 0.5:  # maximal mass step allowed
	  RESOLUTION = 0.5

	R=0.00001/RESOLUTION#  % R is used to scale nuclide masses (see below)

	WINDOW_SIZE=WINDOW_SIZE/RESOLUTION;   # convert window size to new mass units
	WINDOW_SIZE=next2pow(WINDOW_SIZE);  # fast radix-2 fast-Fourier transform algorithm

	if WINDOW_SIZE < np.round(496708*R)+1:
	  WINDOW_SIZE = nextpow2(np.round(496708*R)+1)  # just to make sure window is big enough

	# print 'Vector size: 1x%d'%WINDOW_SIZE

	#H378 C254 N65 O75 S6
	
	# M=np.array([378,254,65,75,6,0]) #% empiric formula, e.g. bovine insulin
	M=np.array(fVec) #% empiric formula, e.g. bovine insulin

	# isotopic abundances stored in matrix A (one row for each element)
	A=np.zeros((MAX_ELEMENTS,MAX_ISOTOPES,2));

	A[0][0,:] = [100783,0.9998443]#                 % 1H
	A[0][1,:] = [201410,0.0001557]#                 % 2H
	A[1][0,:] = [100000,0.98889]#                   % 12C
	A[1][1,:] = [200336,0.01111]#                   % 13C
	A[2][0,:] = [100307,0.99634]#                   % 14N
	A[2][1,:] = [200011,0.00366]#                   % 15N
	A[3][0,:] = [99492,0.997628]#                  % 16O
	A[3][1,:] = [199913,0.000372]#                  % 17O
	A[3][2,:] = [299916,0.002000]#                  % 18O
	A[4][0,:] = [97207,0.95018]#                   % 32S
	A[4][1,:] = [197146,0.00750]#                   % 33S
	A[4][2,:] = [296787,0.04215]#                   % 34S
	A[4][3,:] = [496708,0.00017]#                   % 36S
	A[5][0,:] = [97376,1.0]# Phosphorous
	A[6][0,:] = [100783,1.0-DAbund]#                 % 1H
	A[6][1,:] = [201410,DAbund]#                 % 2H
	A[7][0,:] = [100000,1.00000]#                   % for shifting mass so that Mmi is
	#                                             % near left limit of window
	mass_removed_vec = [0,11,13,15,31,30,0,-1]
	monoisotopic = 0.0
	for i,e in enumerate(fVec):
		monoisotopic = monoisotopic + ( (mass_removed_vec[i]*scaleFactor+A[i][0,0])*e / scaleFactor)

	Mmi=np.array([np.round(100783*R), np.round(100000*R),\
	             np.round(100307*R), np.round(99492*R), np.round(97207*R), np.round(97376*R), np.round(100783*R), 0])*M#  % (Virtual) monoisotopic mass in new units
	Mmi = Mmi.sum()
	#% mass shift so Mmi is in left limit of window:
	#print "Mmi",Mmi
	#print "Window", WINDOW_SIZE
	FOLDED=np.floor(Mmi/(WINDOW_SIZE-1))+1#  % folded FOLDED times (always one folding due to shift below)

	#% shift distribution to 1 Da from lower window limit:
	M[MAX_ELEMENTS-1]=np.ceil(((WINDOW_SIZE-1)-np.mod(Mmi,WINDOW_SIZE-1)+np.round(100000*R))*RESOLUTION)
	MASS_REMOVED=np.array(mass_removed_vec)*M#';  % correction for 'virtual' elements and mass shift
	MASS_REMOVED = MASS_REMOVED.sum()

	ptA=np.ones(WINDOW_SIZE);
	t_fft=0
	t_mult=0

	for i in xrange(MAX_ELEMENTS):
	    tA=np.zeros(WINDOW_SIZE)
	    for j in xrange(MAX_ISOTOPES):
	        if A[i][j,0] != 0:
	            tA[np.round(A[i][j,0]*R)]=A[i][j,1]#;  % put isotopic distribution in tA

	    tA=F.fft(tA) # FFT along elements isotopic distribution  O(nlogn)
	    tA=tA**M[i]#  % O(n)
	    ptA = ptA*tA#  % O(n)#this is where it is messing UP

	ptA=F.ifft(ptA).real#;  % O(nlogn)

	start = (FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED
	stop = WINDOW_SIZE - 1

	MA=np.linspace((FOLDED*(WINDOW_SIZE-1)+1)*RESOLUTION+MASS_REMOVED,(FOLDED+1)*(WINDOW_SIZE-1)*RESOLUTION+MASS_REMOVED, WINDOW_SIZE-1)

	ind=np.where(ptA>CUTOFF)[0]

	x = MA[ind]
	y = ptA[ind]

	for i,xi in enumerate(x):
		x[i] = monoisotopic + (i*1.003355)


	return x,y,monoisotopic
Esempio n. 27
0
MAX_MASS = 2**13  #% fast radix-2 fast-Fourier transform algorithm is used

M = N.array([378, 234, 65, 75, 6])  #% empirical formula, e.g. bovine insulin

A = N.zeros((MAX_ELEMENTS,
             MAX_MASS))  #                 % isotopic abundancies stored in A

A[0, 1:3] = [0.9998443, 0.0001557]  #                 % H
A[1, 12:14] = [0.98889, 0.01111]  #                   % C
A[2, 14:16] = [0.99634, 0.00366]  #                   % N
A[3, 16:19] = [0.997628, 0.000372, 0.002000]  #        % O
A[4, 32:37] = [0.95018, 0.00750, 0.04215, 0,
               0.00017]  # % S (extend to other elements as needed)

tA = F.fft(
    A, axis=1
)  #                     % FFT along each element's isotopic distribution

ptA = N.ones(MAX_MASS)
for i in xrange(MAX_ELEMENTS - 1):
    ptA = ptA * (tA[i, :]**M[i]
                 )  #;         % multiply transforms (elementwise)

riptA = F.ifft(ptA).real  #              % inverse FFT to get convolutions

id = N.zeros(MAX_MASS)
id[0:MAX_MASS - 1] = riptA[1:MAX_MASS]  #; % shift to real mass

print id.argmax(), id.max()
P.plot(riptA)
P.show()