示例#1
0
文件: GSRT.py 项目: blueray45/GSRT
def run_analysis(filename,mode,method):
    click.echo('Reading file : %s'%filename)
    data = IOfile.parsing_input_file(filename)
    click.echo('Creating class...')
    theclass = TFC(data)
    click.echo('Calculating transfer function using %s method'%method)
    if method=='tf_kramer286_sh':
        theclass.tf_kramer286_sh()
    elif method=='tf_knopoff_sh':
        theclass.tf_knopoff_sh()
    elif method=='tf_knopoff_sh_adv':
        theclass.tf_knopoff_sh_adv()
        
    plt.plot(theclass.freq,np.abs(theclass.tf[0]),label=method)
    plt.xlabel('frequency (Hz)')
    plt.ylabel('Amplification')
    plt.yscale('log')
    plt.xscale('log')
    plt.grid(True,which='both')
    plt.legend(loc='best',fancybox=True,framealpha=0.5)
    #plt.axis('tight')
    plt.autoscale(True,axis='x',tight=True)
    plt.tight_layout()
    plt.savefig('test.png', format='png')
    click.echo(click.style('Calculation has been finished!',fg='green'))
示例#2
0
def get_words_from(file, word_counted):
    res_file = IOfile.read(file['path'] + file['name'], file['ext'])
    if not res_file[0]:
        return res_file
    lang_mask = ''.join(get_lang_mask('patterns'))
    pattern = r'\b(?<![0-9]-)[' + lang_mask + '][-' + lang_mask + ']*'
    words = re.findall(pattern, res_file[1].lower())
    for word in words:
        if word not in word_counted:
            word_counted[word] = {
                "word": word,
                "count": 1,
                "len": len(word),
                "vowel": count_chars(word, 'vowels'),
                "consonant": count_chars(word, 'consonants'),
                "files": {
                    file['_id']: 1
                },
            }
        else:
            word_counted[word]["count"] += 1
            word_counted[word]["files"][file['_id']] = \
                word_counted[word]["files"].get(file['_id'], 0) + 1
    return word_counted, {
        'total_words': len(words),
        'uniq_words': len(set(words))
    }
示例#3
0
def getGroups():
    # Получения списка людей, которых нужно "просканировать"
    listPeople = IOfile.getListPeople()
    # Получение колличества "просканированных людей"
    index = len(listPeople)
    # Инициализация переменной счётчика
    counter = 0
    # Цикл, который получает список групп данных пользователей и обрабатывает его
    while counter < index:
        # Через VK API получаем список групп
        params = {"user_id" : listPeople[counter], "access_token" : token, "count" : "0","v" : "5.73"}
        response = requests.get("https://api.vk.com/method/groups.get", params)
        # Полученный список преобразуем в удобную нам форму для работы с ним
        listUsersGroups = response.json()["response"]["items"]
        # Записываем в переменную список существующих групп
        listGroups = open("listGroups.txt", 'r')
        listIdGroups = listGroups.read()
        listIdGroups = listIdGroups.split('\n')
        listGroups.close()
        # Проверяем, имеется ли группа пользователя уже в списке или нет
        for i in listIdGroups:
            for j in listUsersGroups:
                if i == j:
                    # Если существует, то ПОКА пропускаем
                    continue
                else:
                    # Если не существует, то добавляем
                    listGroups = open("listGroups.txt", 'a')
                    listGroups.write(str(j) + '\n')
                    listGroups.close()
        counter+=1
def main():
    '''runs all of the key functions to bring this program together'''
    filename = sys.argv[-1]
    fin = IOfile.finOpen(filename)
    data = parse.parseLists(fin)  #Results from leave-one-out validation
    IOfile.screenOut(filename, data)
    fout = IOfile.foutOpen(filename)
    IOfile.cvOut(filename, fout, data)
    fin.close()
    fout.close()
示例#5
0
import IOfile
from TFCalculator import TFCalculator as TFC
import TFDisplayTools
from TSCalculator import TSCalculator as TSC

# single layer test case

# filename
basedir = 'Example/Input'
fname = os.path.join(basedir,'sampleinput_linear_elastic_1layer_halfspace.dat')
fname2 =  os.path.join(basedir,'sampleinput_psv_s_linear_elastic_1layer_halfspace.dat')
fname3 =  os.path.join(basedir,'sampleinput_psv_p_linear_elastic_1layer_halfspace.dat')
fname4 =  os.path.join(basedir,'GoverGmax.dat')

# input file reading
datash = IOfile.parsing_input_file(fname)
datapsvs = IOfile.parsing_input_file(fname2)
# datapsvp = IOfile.parsing_input_file(fname3)
datanonlin = IOfile.parsing_nonlinear_parameter(fname4,True)

def modnlayer(data,nlayer):
    nl = nlayer
    data['sourceloc']=nl
    data['nlayer']=nl+1
    data['tfPair'][0][1]=nl
    newhl = []; newvs = []; newdn = []; newqs = []; newvp = []; newqp = []
    newsoiltype = []
    for j in range(nl):
        newhl.append(data['hl'][0]/nl)
        newvs.append(data['vs'][0])
        newqs.append(data['qs'][0])
@author: irnakat
"""

# test IOfile
import IOfile
from TFCalculator import TFCalculator as TFC
import TFDisplayTools

# validity test for SH PSV case using S wave as an input

# filename
fname = 'sampleinput_linear_elastic_1layer_halfspace.dat'
fname2 = 'sampleinput_psv_s_linear_elastic_1layer_halfspace.dat'

# input file reading
datash = IOfile.parsing_input_file(fname)
datapsvs = IOfile.parsing_input_file(fname2)

# kramer
print 'TF calculatoin using kramer approach'
theclass1 = TFC(datash)
theclass1.tf_kramer286_sh() # check/verify kramer calculation
print 'calculation has been finished!'

# knopoff sh complete
print 'TF calculation using complete knopoff sh approach'
theclass3 = TFC(datash)
theclass3.tf_knopoff_sh_adv()
print 'calculation has been finished!'

# knopoff psv-s
示例#7
0
def sensitivityTools(fname,sensitivity='incidence angle',senslinspace=[0.,90.,91],method='tf_kramer286_sh',yscale='lin'):
    import IOfile
    import numpy as np
    import pylab as plt
    from TFCalculator import TFCalculator as TFC
    from TFDisplayTools import SpectroPlot
    
    # timing module
    import time
    
    data = IOfile.parsing_input_file(fname)
    data['sensitivity'] = True
    elapsed = []
    x = np.array([])
    y = np.array([])
    z = np.array([])
    if yscale == 'log':
        ianglist = np.logspace(np.log10(senslinspace[0]),np.log10(senslinspace[1]),senslinspace[2])
    else:
        ianglist = np.linspace(senslinspace[0],senslinspace[1],senslinspace[2])
    
    if sensitivity=='incidence angle':
        for i in range(len(ianglist)):
            data['iang'] = np.deg2rad(ianglist[i])
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[0])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Incidence angle'
            #print np.min(theclass.freq),np.min(np.abs(tf[0]))
        data['iang']=ianglist
            
    elif sensitivity=='incidence angle phase':
        for i in range(len(ianglist)):
            data['iang'] = np.deg2rad(ianglist[i])
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.rad2deg(np.angle(tf[0]))))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Incidence angle'
        data['iang']=ianglist
        
    elif sensitivity=='incidence angle vectorial':
        for i in range(len(ianglist)):
            data['iang'] = np.deg2rad(ianglist[i])
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.sqrt(np.abs(tf[0])**2+np.abs(tf[1])**2)))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Incidence angle'
        data['iang']=ianglist
            
    elif sensitivity=='thickness':   
        for i in range(len(ianglist)):
            data['hl'] = [ianglist[i],0.]
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[0])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Thickness (m)'
        data['hl'][0]=ianglist.tolist()
            
    elif sensitivity[:2]=='vp':   
        tmp = sensitivity.split()
        for i in range(len(ianglist)):
            data['vp'][int(tmp[1])-1] = ianglist[i]
            data['comp']='p'
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[1])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Vp (m/s)'
        data['vp'][int(tmp[1])-1]=ianglist.tolist()
            
    elif sensitivity[:2]=='vs':   
        tmp = sensitivity.split()
        for i in range(len(ianglist)):
            data['vs'][int(tmp[1])-1] = ianglist[i]
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[0])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = 'Vs (m/s)'
        data['vs'][int(tmp[1])-1]=ianglist.tolist()
        
    elif sensitivity[:2]=='qp':   
        tmp = sensitivity.split()
        for i in range(len(ianglist)):
            data['qp'][int(tmp[1])-1] = ianglist[i]
            data['comp']='p'
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[1])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = '$Q_{P}$'
        data['qp'][int(tmp[1])-1]=ianglist.tolist()
        
    elif sensitivity[:2]=='qs':   
        tmp = sensitivity.split()
        for i in range(len(ianglist)):
            data['qs'][int(tmp[1])-1] = ianglist[i]
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[0])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = '$Q_{S}$'
        data['qs'][int(tmp[1])-1]=ianglist.tolist()
        
    elif sensitivity[:2]=='dn':   
        tmp = sensitivity.split()
        for i in range(len(ianglist)):
            data['dn'][int(tmp[1])-1] = ianglist[i]
            theclass = TFC(data)
            start = time.clock()
            tf = eval('theclass.'+method+'()')
            elapsed.append(time.clock()-start)
            x = np.concatenate((x,theclass.freq+theclass.freq[1]))
            z = np.concatenate((z,np.abs(tf[0])))
            y = np.concatenate((y,np.zeros_like(theclass.freq)+ianglist[i]))
            ylabel = '$\\rho (kg/m^3)$'
        data['dn'][int(tmp[1])-1]=ianglist.tolist()
            
            
    data['x'] = x
    data['y'] = y
    data['z'] = z

    print('method : %s; average elapsed time : %.6f with standard deviation : %.6f'%(method,np.mean(elapsed),np.std(elapsed)))
    if yscale=='log':
        SpectroPlot(data,nx=100,ny=100,ylabel=ylabel,cmap='rainbow',yscale='log')
    else:
        SpectroPlot(data,nx=100,ny=100,ylabel=ylabel,cmap='rainbow')
示例#8
0
    def linear_equivalent_TF2TS(self,sublayercriteria = 5.,numiter = 10,conv_level = 0.01,verbose=False):
        """
        Calculation of linear equivalent method for transfer function
        """
        # set up linear equivalent parameters
        #sublayercriteria = 5.   # maximum thickness of layer
        #numiter = 10            # maximum number of iteration
        #conv_level = 0.01       # convergence limit
        
        # read G/Gmax and Damping Ratio
        try:
            self.nonlinpar = IOfile.parsing_nonlinear_parameter(self.parameters['GoverGmaxfile'][0])
        except:
            raise KeyError('GoveGmaxfile is not detected! Unable to run linear equivalent calculation!')
        
        # perform sublayer addition
        newhl = []; newvs = []; newqs = []; newvp = []; newqp = []; newdn = []; newst = []; nli = []
        for i in range(len(self.parameters['hl'])-1):
            if self.parameters['hl'][i]>sublayercriteria:
                nlayer = np.ceil(self.parameters['hl'][i]/sublayercriteria)
                newhl = np.concatenate((newhl,[self.parameters['hl'][i]/nlayer for j in range(int(nlayer))]))
                newvs = np.concatenate((newvs,[self.parameters['vs'][i] for j in range(int(nlayer))]))
                newqs = np.concatenate((newqs,[self.parameters['qs'][i] for j in range(int(nlayer))]))
                newdn = np.concatenate((newdn,[self.parameters['dn'][i] for j in range(int(nlayer))]))
                newst = np.concatenate((newst,[self.parameters['soiltype'][i] for j in range(int(nlayer))]))
                nli.append(nlayer)
                if self.parameters['modeID']==12:
                    newvp = np.concatenate((newvp,[self.parameters['vp'][i] for j in range(int(nlayer))]))
                    newqp = np.concatenate((newqp,[self.parameters['qp'][i] for j in range(int(nlayer))]))
            else:
                newhl = np.concatenate((newhl,[self.parameters['hl'][i]]))
                newvs = np.concatenate((newvs,[self.parameters['vs'][i]]))
                newqs = np.concatenate((newqs,[self.parameters['qs'][i]]))
                newdn = np.concatenate((newdn,[self.parameters['dn'][i]]))
                nli.append(1.)
                newst = np.concatenate((newst,[self.parameters['soiltype'][i]]))
                if self.parameters['modeID']==12:
                    newvp = np.concatenate((newvp,[self.parameters['vp'][i]]))
                    newqp = np.concatenate((newqp,[self.parameters['qp'][i]]))
        # for the last layer
        newhl = np.concatenate((newhl,[0]))
        newvs = np.concatenate((newvs,[self.parameters['vs'][-1]]))
        newqs = np.concatenate((newqs,[self.parameters['qs'][-1]]))
        newdn = np.concatenate((newdn,[self.parameters['dn'][-1]]))
        newst = np.concatenate((newst,[self.parameters['soiltype'][-1]]))
        nli.append(1.)
        if self.parameters['modeID']==12:
            newvp = np.concatenate((newvp,[self.parameters['vp'][-1]]))
            newqp = np.concatenate((newqp,[self.parameters['qp'][-1]]))
        # assign sublayer to parameter
        self.parameters['nlayer']=len(newhl)
        self.parameters['hl']=newhl.tolist()
        self.parameters['vs']=newvs.tolist()
        self.parameters['qs']=newqs.tolist()
        self.parameters['dn']=newdn.tolist()
        self.parameters['soiltype']=newst.tolist()
        if self.parameters['modeID']==12:
            self.parameters['vp']=newvp.tolist()
            self.parameters['qp']=newqp.tolist()
        # correction of tfpair
        oldpair = self.parameters['tfPair']  
        for i in range(len(oldpair)):
            oldpair[i][0] = int(np.sum(nli[:oldpair[i][0]]))
            oldpair[i][1] = int(np.sum(nli[:oldpair[i][1]]))
        # correction of source location
        self.parameters['sourceloc']=int(np.sum(nli[:self.parameters['sourceloc']]))
        # modification of tfpair
        newpair = [[i,i+1] for i in range(self.parameters['nlayer']-1)]
        self.parameters['tfPair'] = newpair
        self.parameters['ntf'] = len(newpair)
        
        # initial calculation G/Gmax = 1, damping ratio = 1st value
        Gmax = [self.parameters['dn'][i]*self.parameters['vs'][i]**2 for i in range(len(newpair))]
        Gerr = np.zeros((len(newpair)))
        Derr = np.zeros((len(newpair)))
        
        # print original model information
        if verbose:
            print 'iteration %3d'%0
            print 'Thickness\tVs\t Qs\t convG\t convD'
            for i in range(len(newpair)):
                print '%.2f\t\t%.2f\t%.2f\tn/a\tn/a'%(self.parameters['hl'][i],self.parameters['vs'][i],self.parameters['qs'][i])        
        
        for j in range(numiter):     
            if verbose:
                print 'iteration %3d'%(j+1)
                print 'Thickness\tVs\t Qs\t convG\t convD'
            self.linear_TF2TS(parameters=self.parameters)
            # check non linearity and update parameter
            for i in range(len(newpair)):
                # retrieve old G and D
                oldG = self.parameters['dn'][i]*self.parameters['vs'][i]**2                
                oldD = 1./(2.*self.parameters['qs'][i])
                # update G and D parameter
                if int(self.parameters['soiltype'][i])>=0:
                    val,idx =  self.find_nearest(self.nonlinpar['nonlin strain'][int(self.parameters['soiltype'][i])-1],np.max(self.time_series[i*2]))
                    self.parameters['vs'][i]=np.sqrt((Gmax[i]*self.nonlinpar['nonlin G/Gmax'][int(self.parameters['soiltype'][i])-1][idx])/self.parameters['dn'][i])
                    self.parameters['qs'][i]=1./(2.*self.nonlinpar['nonlin damping'][int(self.parameters['soiltype'][i]-1)][idx]/100.)
                # retrieve new G and D
                newG = self.parameters['dn'][i]*self.parameters['vs'][i]**2
                newD = 1./(2.*self.parameters['qs'][i])
                # calculate rate of change as the convergence level
                Gerr[i] = np.abs(newG-oldG)/oldG
                Derr[i] = np.abs(newD-oldD)/oldD
                if verbose:
                    print '%.2f\t\t%.2f\t%.2f\t%.4f\t%.4f'%(self.parameters['hl'][i],self.parameters['vs'][i],self.parameters['qs'][i],Gerr[i],Derr[i])
            if np.max(Gerr)<=conv_level and np.max(Derr)<=conv_level:
                if verbose:
                    print('convergence has been reached! Calculation is stopped!')
                break
            
        self.lastiter = deepcopy(j)+1

        # correction of tfpair
        self.parameters['tfPair']=oldpair
        self.parameters['ntf']=len(oldpair)
        self.linear_TF2TS(parameters=self.parameters)
示例#9
0
    def __init__(self,parfile,method='auto',sublayercriteria = 5.,numiter = 10,conv_level = 0.01,verbose=False):
        # parameter file initialization
        self.parfile = parfile
        # read file parameter
        self.parameters = IOfile.parsing_input_file(parfile)
        # method is automatically defined
#        if method=='auto':
#            if self.parameters['type']=='PSV':
#                self.method = 'knopoff_psv_adv'
#            else:
#                if self.parameters['nlayer']<=5:
#                    if self.parameters['iang']==0.:
#                        self.method='knopoff_sh'
#                    else:
#                        self.method='knopoff_sh_adv'
#                else:
#                    self.method = 'kennet_sh'
        # checking input file
        if self.parameters['inputmotion'][1]=='ascii':
            self.inp_time,self.inp_signal = IOfile.read_ascii_seismogram(self.parameters['inputmotion'][0])
        else:
            raise KeyError('Input motion other than ascii format is not yet supported! Please convert it to displacement on another software first!')
        self.inp_signal = [i/100. for i in self.inp_signal]
        self.fs = 1/(self.inp_time[1]-self.inp_time[0])
        self.dt = self.inp_time[1]-self.inp_time[0]
        self.df = 1./((len(self.inp_signal)-1)*self.dt)
        
        # baseline correction for input signal
        self.inp_signal = self.inp_signal-np.mean(self.inp_signal)        
        
#        if self.parameters['inputmotion'][2]=='vel':
#            self.inp_signal = GT.vel2disp(self.inp_signal,self.dt)
#            self.inp_signal = self.cosine_tapering(self.inp_signal)
#            self.inp_signal = self.butter_highpass_filter(self.inp_signal,2.*self.df,self.fs)
#        elif self.parameters['inputmotion'][2]=='acc':
#            self.inp_signal = GT.acc2disp(self.inp_signal,self.dt)
#            self.inp_signal = self.cosine_tapering(self.inp_signal)
#            self.inp_signal = self.butter_highpass_filter(self.inp_signal,2.*self.df,self.fs)
            
        if self.parameters['modeID']==11 or self.parameters['modeID']==12:
            # method is automatically defined
            if method=='auto':
                if self.parameters['type']=='PSV':
                    self.method = 'knopoff_psv_adv'
                else:
                    if self.parameters['iang']==0.:
                        self.method = 'kramer286_sh'
                    else:
                        self.method = 'knopoff_sh_adv'
            else:
                self.method = method
            self.linear_equivalent_TF2TS(sublayercriteria,numiter,conv_level,verbose)
        else:
            # method is automatically defined
            if method=='auto':
                if self.parameters['type']=='PSV':
                    self.method = 'knopoff_psv_adv'
                else:
                    if self.parameters['iang']==0.:
                        self.method = 'knopoff_sh'
                    else:
                        self.method = 'knopoff_sh_adv'
            else:
                self.method = method
            self.linear_TF2TS()
            self.lastiter = 1
        
        if self.parameters['inputmotion'][2]=='acc':
            for i in range(len(self.time_series)):
                self.time_series[i] = GT.disp2acc(self.time_series[i],self.dt)
        elif self.parameters['inputmotion'][2]=='vel':
            for i in range(len(self.time_series)):
                self.time_series[i] = GT.disp2vel(self.time_series[i],self.dt)