예제 #1
0
def allpsd(home, project_name, run_name, run_number, GF_list, d_or_s, v_or_d,
           decimate, lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''

    from numpy import genfromtxt, where, log10, savez
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate
    import nitime.algorithms as tsa

    #Decide what I'm going to work on
    sta = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                     usecols=0,
                     dtype='S')
    gf = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                    usecols=[4, 5],
                    dtype='f')
    datapath = home + project_name + '/data/waveforms/'
    synthpath = home + project_name + '/output/inverse_models/waveforms/'
    outpath = home + project_name + '/analysis/frequency/'
    if v_or_d.lower() == 'd':
        kgf = 0  #disp
        datasuffix = 'kdisp'
        synthsuffix = 'disp'
    elif v_or_d.lower() == 'v':
        kgf = 1  #disp
        datasuffix = 'kvel'
        synthsuffix = 'vel'
    if d_or_s.lower() == 'd':  #We're working on observed data
        path = datapath
        suffix = datasuffix
    else:  #We're looking at syntehtics from a certain run
        path = synthpath
        suffix = synthsuffix
    i = where(gf[:, kgf] == 1)[0]
    for k in range(len(i)):
        print('Working on ' + sta[i[k]])
        if d_or_s.lower() == 'd':  #Read data
            n = read(path + sta[i[k]] + '.' + suffix + '.n')
            e = read(path + sta[i[k]] + '.' + suffix + '.e')
            u = read(path + sta[i[k]] + '.' + suffix + '.u')
            outname = sta[i[k]] + '.' + suffix + '.psd'
            if lowpass != None:
                fsample = 1. / e[0].stats.delta
                e[0].data = lfilter(e[0].data, lowpass, fsample, 10)
                n[0].data = lfilter(n[0].data, lowpass, fsample, 10)
                u[0].data = lfilter(u[0].data, lowpass, fsample, 10)
            if decimate != None:
                n[0] = stdecimate(n[0], decimate)
                e[0] = stdecimate(e[0], decimate)
                u[0] = stdecimate(u[0], decimate)
        else:  #Read synthetics
            n = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.n.sac')
            e = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.e.sac')
            u = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.u.sac')
            outname = run_name + '.' + run_number + '.' + sta[
                i[k]] + '.' + suffix + '.psd'
        #Compute spectra
        fn, npsd, nu = tsa.multi_taper_psd(n[0].data,
                                           Fs=1. / n[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        fe, epsd, nu = tsa.multi_taper_psd(e[0].data,
                                           Fs=1. / e[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        fu, upsd, nu = tsa.multi_taper_psd(u[0].data,
                                           Fs=1. / u[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        #Convert to dB
        npsd = 10 * log10(npsd)
        epsd = 10 * log10(epsd)
        upsd = 10 * log10(upsd)
        #Write to file
        savez(outpath + outname,
              fn=fn,
              fe=fe,
              fu=fu,
              npsd=npsd,
              epsd=epsd,
              upsd=upsd)
예제 #2
0
def allcoherence(home, project_name, run_name, run_number, GF_list, v_or_d,
                 decimate, lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''

    from numpy import genfromtxt, where, savez, c_, pi
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate
    import nitime.algorithms as tsa

    #Regularized coherence
    eps = 0.000001
    al = 10.
    #Decide what I'm going to work on
    sta = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                     usecols=0,
                     dtype='S')
    gf = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                    usecols=[4, 5],
                    dtype='f')
    datapath = home + project_name + '/data/waveforms/'
    synthpath = home + project_name + '/output/inverse_models/waveforms/'
    outpath = home + project_name + '/analysis/frequency/'
    if v_or_d.lower() == 'd':
        kgf = 0  #disp
        datasuffix = 'kdisp'
        synthsuffix = 'disp'
    elif v_or_d.lower() == 'v':
        kgf = 1  #disp
        datasuffix = 'kvel'
        synthsuffix = 'vel'
    i = where(gf[:, kgf] == 1)[0]
    for k in range(len(i)):
        print('Working on ' + sta[i[k]])
        #Read data
        n = read(datapath + sta[i[k]] + '.' + datasuffix + '.n')
        e = read(datapath + sta[i[k]] + '.' + datasuffix + '.e')
        u = read(datapath + sta[i[k]] + '.' + datasuffix + '.u')
        if lowpass != None:
            fsample = 1. / e[0].stats.delta
            e[0].data = lfilter(e[0].data, lowpass, fsample, 10)
            n[0].data = lfilter(n[0].data, lowpass, fsample, 10)
            u[0].data = lfilter(u[0].data, lowpass, fsample, 10)
        if decimate != None:
            n[0] = stdecimate(n[0], decimate)
            e[0] = stdecimate(e[0], decimate)
            u[0] = stdecimate(u[0], decimate)
        #Read synthetics
        nsyn = read(synthpath + run_name + '.' + run_number + '.' + sta[i[k]] +
                    '.' + synthsuffix + '.n.sac')
        esyn = read(synthpath + run_name + '.' + run_number + '.' + sta[i[k]] +
                    '.' + synthsuffix + '.e.sac')
        usyn = read(synthpath + run_name + '.' + run_number + '.' + sta[i[k]] +
                    '.' + synthsuffix + '.u.sac')
        #What's the sampling rate correction?
        Fs = 1. / n[0].stats.delta
        Fsc = Fs / (2. * pi)
        #Compute coherence
        data = c_[n[0].data, nsyn[0].data].T
        fn, cn = tsa.cohere.coherence_regularized(data,
                                                  epsilon=eps,
                                                  alpha=al,
                                                  csd_method={
                                                      'this_method':
                                                      'multi_taper_csd',
                                                      'adaptive': True,
                                                      'low_bias': True
                                                  })
        fn = fn * Fsc
        cn = cn[1, 0, :].real
        data = c_[e[0].data, esyn[0].data].T
        fe, ce = tsa.cohere.coherence_regularized(data,
                                                  epsilon=eps,
                                                  alpha=al,
                                                  csd_method={
                                                      'this_method':
                                                      'multi_taper_csd',
                                                      'adaptive': True,
                                                      'low_bias': True
                                                  })
        fe = fe * Fsc
        ce = ce[1, 0, :].real
        data = c_[u[0].data, usyn[0].data].T
        fu, cu = tsa.cohere.coherence_regularized(data,
                                                  epsilon=eps,
                                                  alpha=al,
                                                  csd_method={
                                                      'this_method':
                                                      'multi_taper_csd',
                                                      'adaptive': True,
                                                      'low_bias': True
                                                  })
        fu = fu * Fsc
        cu = cu[1, 0, :].real
        #Write to file
        outname = run_name + '.' + run_number + '.' + sta[
            i[k]] + '.' + synthsuffix + '.coh'
        savez(outpath + outname, fn=fn, fe=fe, fu=fu, cn=cn, ce=ce, cu=cu)
예제 #3
0
def allcoherence(home,project_name,run_name,run_number,GF_list,v_or_d,decimate,lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''
    
    from numpy import genfromtxt,where,savez,c_,pi
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate 
    import nitime.algorithms as tsa
    
    #Regularized coherence
    eps=0.000001
    al=10.
    #Decide what I'm going to work on
    sta=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=0,dtype='S')
    gf=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=[4,5],dtype='f')
    datapath=home+project_name+'/data/waveforms/'
    synthpath=home+project_name+'/output/inverse_models/waveforms/'
    outpath=home+project_name+'/analysis/frequency/'
    if v_or_d.lower()=='d':
        kgf=0 #disp
        datasuffix='kdisp'
        synthsuffix='disp'
    elif v_or_d.lower()=='v':
        kgf=1 #disp
        datasuffix='kvel'
        synthsuffix='vel'
    i=where(gf[:,kgf]==1)[0]
    for k in range(len(i)):
        print 'Working on '+sta[i[k]]
        #Read data
        n=read(datapath+sta[i[k]]+'.'+datasuffix+'.n')
        e=read(datapath+sta[i[k]]+'.'+datasuffix+'.e')
        u=read(datapath+sta[i[k]]+'.'+datasuffix+'.u')
        if lowpass!=None:
            fsample=1./e[0].stats.delta
            e[0].data=lfilter(e[0].data,lowpass,fsample,10)
            n[0].data=lfilter(n[0].data,lowpass,fsample,10)
            u[0].data=lfilter(u[0].data,lowpass,fsample,10)
        if decimate!=None:
            n[0]=stdecimate(n[0],decimate)
            e[0]=stdecimate(e[0],decimate)
            u[0]=stdecimate(u[0],decimate)
        #Read synthetics
        nsyn=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.n.sac')
        esyn=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.e.sac')
        usyn=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.u.sac')
        #What's the sampling rate correction?
        Fs=1./n[0].stats.delta
        Fsc=Fs/(2.*pi)
        #Compute coherence
        data=c_[n[0].data,nsyn[0].data].T
        fn,cn=tsa.cohere.coherence_regularized(data,epsilon=eps,alpha=al,
            csd_method={'this_method':'multi_taper_csd','adaptive':True,'low_bias':True})
        fn=fn*Fsc
        cn=cn[1,0,:].real
        data=c_[e[0].data,esyn[0].data].T
        fe,ce=tsa.cohere.coherence_regularized(data,epsilon=eps,alpha=al,
            csd_method={'this_method':'multi_taper_csd','adaptive':True,'low_bias':True})
        fe=fe*Fsc
        ce=ce[1,0,:].real
        data=c_[u[0].data,usyn[0].data].T
        fu,cu=tsa.cohere.coherence_regularized(data,epsilon=eps,alpha=al,
            csd_method={'this_method':'multi_taper_csd','adaptive':True,'low_bias':True})
        fu=fu*Fsc
        cu=cu[1,0,:].real
        #Write to file
        outname=run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.coh'
        savez(outpath+outname,fn=fn,fe=fe,fu=fu,cn=cn,ce=ce,cu=cu) 
예제 #4
0
파일: view.py 프로젝트: degoldbe/mudpyseg
def synthetics(home,project_name,run_name,run_number,gflist,vord,decimate,lowpass,t_lim,sort,scale,k_or_g):
    '''
    Plot synthetics vs real data
    
    gflist: The GF control file that decides what to plot/not plot
    datapath
    '''
    from obspy import read
    from numpy import genfromtxt,where,argsort
    import matplotlib.pyplot as plt
    import matplotlib
    from mudpy.green import stdecimate 
    from mudpy.forward import lowpass as lfilter
    
    matplotlib.rcParams.update({'font.size': 14})
    
    #Decide what to plot
    sta=genfromtxt(home+project_name+'/data/station_info/'+gflist,usecols=0,dtype='S')
    lon=genfromtxt(home+project_name+'/data/station_info/'+gflist,usecols=[1],dtype='f')
    lat=genfromtxt(home+project_name+'/data/station_info/'+gflist,usecols=[2],dtype='f')
    gf=genfromtxt(home+project_name+'/data/station_info/'+gflist,usecols=[4,5],dtype='f')
    datapath=home+project_name+'/data/waveforms/'
    synthpath=home+project_name+'/output/inverse_models/waveforms/'
    if vord.lower()=='d':
        kgf=0 #disp
        if k_or_g.lower()=='kal':
            #datasuffix='kdisp'
            datasuffix='HX'
        else:
            #datasuffix='disp'
            datasuffix='HX'
        synthsuffix='disp'
    elif vord.lower()=='v':
        kgf=1 #disp
        datasuffix='kvel'
        synthsuffix='vel'
    #Decide on sorting
    i=where(gf[:,kgf]==1)[0]  
    if sort.lower()=='lon':
        j=argsort(lon[i])[::-1]
        i=i[j]
    elif sort.lower()=='lat':
        j=argsort(lat[i])[::-1] 
        i=i[j]
    nsta=len(i)
    fig, axarr = plt.subplots(nsta, 3)  
    for k in range(len(i)):
        n=read(datapath+sta[i[k]]+'.'+datasuffix+'.n')
        e=read(datapath+sta[i[k]]+'.'+datasuffix+'.e')
        u=read(datapath+sta[i[k]]+'.'+datasuffix+'.u')
        if lowpass!=None:
            fsample=1./e[0].stats.delta
            e[0].data=lfilter(e[0].data,lowpass,fsample,10)
            n[0].data=lfilter(n[0].data,lowpass,fsample,10)
            u[0].data=lfilter(u[0].data,lowpass,fsample,10)
        if decimate!=None:
            n[0]=stdecimate(n[0],decimate)
            e[0]=stdecimate(e[0],decimate)
            u[0]=stdecimate(u[0],decimate)
        ns=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.n.sac')
        es=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.e.sac')
        us=read(synthpath+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+synthsuffix+'.u.sac')
        if scale!=None:
            n[0].data=n[0].data/scale
            ns[0].data=ns[0].data/scale
            e[0].data=e[0].data/scale
            es[0].data=es[0].data/scale
            u[0].data=u[0].data/scale
            us[0].data=us[0].data/scale
        #Make plot
        axn=axarr[k,0]
        axe=axarr[k,1]
        axu=axarr[k,2]
        axn.plot(n[0].times(),n[0].data,'k',ns[0].times(),ns[0].data,'r')
        axn.grid(which='both')
        axe.plot(e[0].times(),e[0].data,'k',es[0].times(),es[0].data,'r')
        axe.grid(which='both')
        axu.plot(u[0].times(),u[0].data,'k',us[0].times(),us[0].data,'r')
        axu.grid(which='both')
        axe.yaxis.set_ticklabels([])
        axu.yaxis.set_ticklabels([])
        axe.set_xlim(t_lim)
        axn.set_xlim(t_lim)
        axu.set_xlim(t_lim)
        axn.yaxis.set_ticklabels([])
        axe.yaxis.set_ticklabels([])
        axu.yaxis.set_ticklabels([])
        axn.yaxis.grid(False)
        axe.yaxis.grid(False)
        axu.yaxis.grid(False)
        axn.yaxis.set_ticks([])
        axe.yaxis.set_ticks([])
        axu.yaxis.set_ticks([])
        
        #Annotations
        trange=t_lim[1]-t_lim[0]
        sign=1.
        if abs(min(n[0].data))>max(n[0].data):
            sign=-1. 
        nmax='%.3f' % (sign*max(abs(n[0].data)))
        sign=1.
        if abs(min(ns[0].data))>max(ns[0].data):
            sign=-1. 
        nsmax='%.3f' % (sign*max(abs(ns[0].data)))
        sign=1.
        nlims=axn.get_ylim()
        nrange=nlims[1]-nlims[0]
        
        if abs(min(e[0].data))>max(e[0].data):
            sign=-1.         
        emax='%.3f' % (sign*max(abs(e[0].data)))
        sign=1.
        if abs(min(es[0].data))>max(es[0].data):
            sign=-1. 
        esmax='%.3f' % (sign*max(abs(es[0].data)))
        sign=1.
        elims=axe.get_ylim()
        erange=elims[1]-elims[0]
        
        if abs(min(u[0].data))>max(u[0].data):
            sign=-1. 
        umax='%.3f' % (sign*max(abs(u[0].data)))
        sign=1.
        if abs(min(us[0].data))>max(us[0].data):
            sign=-1 
        usmax='%.3f' % (sign*max(abs(us[0].data)))
        sign=1.
        ulims=axu.get_ylim()
        urange=ulims[1]-ulims[0]
        
        axn.annotate(nmax,xy=(t_lim[1]-0.3*trange,nlims[0]+0.02*nrange),fontsize=12)
        axe.annotate(emax,xy=(t_lim[1]-0.3*trange,elims[0]+0.02*erange),fontsize=12)
        axu.annotate(umax,xy=(t_lim[1]-0.3*trange,ulims[0]+0.02*urange),fontsize=12)
        axn.annotate(nsmax,xy=(t_lim[1]-0.3*trange,nlims[0]+0.7*nrange),fontsize=12,color='red')
        axe.annotate(esmax,xy=(t_lim[1]-0.3*trange,elims[0]+0.7*erange),fontsize=12,color='red')
        axu.annotate(usmax,xy=(t_lim[1]-0.3*trange,ulims[0]+0.7*urange),fontsize=12,color='red')
        #Station name
        axn.set_ylabel(sta[i[k]],rotation=0)
        if k==0:
            if vord.lower()=='d':
                axn.set_title('North (m)')
                axe.set_title('East (m)')
                axu.set_title('Up (m)')
            else:
                axn.set_title('North (m/s)')
                axe.set_title('East (m/s)')
                axu.set_title('Up (m/s)')
        if k!=len(i)-1:
            axn.xaxis.set_ticklabels([])
            axe.xaxis.set_ticklabels([])
            axu.xaxis.set_ticklabels([])
            xtick=axn.xaxis.get_majorticklocs()
            ix=[1,3,5]
            xtick=xtick[ix]
            xticklabel=['','50','','150','','250','']
        if k==len(i)-1: #Last plot
            axe.set_xlabel('Time (s)')
            axn.xaxis.set_ticklabels(xticklabel)
            axe.xaxis.set_ticklabels(xticklabel)
            axu.xaxis.set_ticklabels(xticklabel)
            #axn.xaxis.set_ticks(xtick)
            #axe.xaxis.set_ticks(xtick)
            #axu.xaxis.set_ticks(xtick)
    plt.subplots_adjust(left=0.2, bottom=0.05, right=0.8, top=0.95, wspace=0, hspace=0)
예제 #5
0
def allpsd(home,project_name,run_name,run_number,GF_list,d_or_s,v_or_d,decimate,lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''
    
    from numpy import genfromtxt,where,log10,savez
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate 
    import nitime.algorithms as tsa
    
    #Decide what I'm going to work on
    sta=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=0,dtype='S')
    gf=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=[4,5],dtype='f')
    datapath=home+project_name+'/data/waveforms/'
    synthpath=home+project_name+'/output/inverse_models/waveforms/'
    outpath=home+project_name+'/analysis/frequency/'
    if v_or_d.lower()=='d':
        kgf=0 #disp
        datasuffix='kdisp'
        synthsuffix='disp'
    elif v_or_d.lower()=='v':
        kgf=1 #disp
        datasuffix='kvel'
        synthsuffix='vel'
    if d_or_s.lower()=='d': #We're working on observed data
        path=datapath
        suffix=datasuffix
    else: #We're looking at syntehtics from a certain run
        path=synthpath
        suffix=synthsuffix
    i=where(gf[:,kgf]==1)[0]
    for k in range(len(i)):
        print 'Working on '+sta[i[k]]
        if d_or_s.lower()=='d': #Read data
            n=read(path+sta[i[k]]+'.'+suffix+'.n')
            e=read(path+sta[i[k]]+'.'+suffix+'.e')
            u=read(path+sta[i[k]]+'.'+suffix+'.u')
            outname=sta[i[k]]+'.'+suffix+'.psd'
            if lowpass!=None:
                fsample=1./e[0].stats.delta
                e[0].data=lfilter(e[0].data,lowpass,fsample,10)
                n[0].data=lfilter(n[0].data,lowpass,fsample,10)
                u[0].data=lfilter(u[0].data,lowpass,fsample,10)
            if decimate!=None:
                n[0]=stdecimate(n[0],decimate)
                e[0]=stdecimate(e[0],decimate)
                u[0]=stdecimate(u[0],decimate)
        else: #Read synthetics
            n=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.n.sac')
            e=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.e.sac')
            u=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.u.sac')
            outname=run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.psd'
        #Compute spectra
        fn, npsd, nu = tsa.multi_taper_psd(n[0].data,Fs=1./n[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        fe, epsd, nu = tsa.multi_taper_psd(e[0].data,Fs=1./e[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        fu, upsd, nu = tsa.multi_taper_psd(u[0].data,Fs=1./u[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        #Convert to dB
        npsd=10*log10(npsd)
        epsd=10*log10(epsd)
        upsd=10*log10(upsd)
        #Write to file
        savez(outpath+outname,fn=fn,fe=fe,fu=fu,npsd=npsd,epsd=epsd,upsd=upsd)