def IDA_postprocess(IM, EDP, limits, bUthd, rec_data, g): Pex = [] [Nstripe, Nrec] = rec_data for i in range(0, len(limits)): for edp in EDP: if bUthd[i] > 0: allPex = stat.norm(np.log(limits[i]), bUthd[i]).cdf(np.log(edp)) Pex.append(allPex.sum() / Nrec) else: Pex.append( np.divide(float(len(edp[np.log(edp) > np.log(limits[i])])), float(Nrec))) Pex = np.array(Pex) Pex = Pex.reshape((len(limits), Nstripe)) # <codecell> log_meanSa, log_stSa = [], [] for i in range(0, len(Pex)): data = Pex[i] * Nrec [mu, sigma] = mle(data.astype(int), IM, Nrec, g) log_meanSa.append(mu) log_stSa.append(sigma) print np.exp(mu), sigma print 'medians=', [np.exp(ele) for ele in log_meanSa] print 'total dispersion=', log_stSa return [log_meanSa, log_stSa, Pex]
def IDA_postprocess(IM, EDP, limits, bUthd, rec_data, g): Pex = [] [Nstripe,Nrec] = rec_data; for i in range(0,len(limits)): for edp in EDP: if bUthd[i]>0: allPex = stat.norm(np.log(limits[i]),bUthd[i]).cdf(np.log(edp)) Pex.append(allPex.sum()/Nrec) else: Pex.append(np.divide(float(len(edp[np.log(edp)>np.log(limits[i])])),float(Nrec))) Pex = np.array(Pex) Pex = Pex.reshape((len(limits),Nstripe)) # <codecell> log_meanSa, log_stSa = [], [] for i in range(0,len(Pex)): data = Pex[i]*Nrec [mu, sigma] = mle(data.astype(int), IM, Nrec,g) log_meanSa.append(mu) log_stSa.append(sigma) print np.exp(mu), sigma print 'medians=', [np.exp(ele) for ele in log_meanSa] print 'total dispersion=', log_stSa return [log_meanSa, log_stSa, Pex]
def fragility_process(dcm, totblg, im, noLS, g): [fr] = count_to_poe(dcm, totblg) # Export fragility points to csv #cd = os.getcwd() #output_file = cd+'/outputs/cumulative_damage_matrix.csv' #header = ['IML', 'no.blgs', 'Damage States'] #dat1 = np.zeros(len(fr)) #dat3 = [] #for j in range(0, len(fr)): # dat1[j] = im.tolist()[j][0] # dat3.append([fr[j,:].tolist()[n] for n in range(0,noLS+1)]) # I don't know how to print fr in separeted columns #col_data = [dat1, [totblg]*len(dat1), dat3] #n_lines = len(fr) #print_outputs(output_file,header,n_lines,col_data) #Sort the matrices according to Intensity Measure Levels dcm = np.matrix(np.empty(fr.shape)) I = np.argsort(im, axis=0) FR = fr[I, :] IM = im[I, :] log_meanSa, log_stSa = [], [] # Fit number of buildings exceeding each level of damage with Maximum Likelihood regression method for j in range(1, FR.shape[2]): damage = FR[:, 0, j] * totblg #number of buildings exceeding damage j dint = damage.astype(int) nc = dint.transpose() num_collapse = nc.tolist() IML = [IM[i, 0, 0] for i in range(0, len(IM))] [mu, sigma] = mle(num_collapse, IML, totblg, g) log_meanSa.append(mu) log_stSa.append(sigma) print "LS", j print "theta=", np.exp(mu) print "beta=", sigma return [log_meanSa, log_stSa, FR, IML]
def fragility_process(dcm,totblg,im,noLS, g): [fr] = count_to_poe(dcm,totblg) # Export fragility points to csv #cd = os.getcwd() #output_file = cd+'/outputs/cumulative_damage_matrix.csv' #header = ['IML', 'no.blgs', 'Damage States'] #dat1 = np.zeros(len(fr)) #dat3 = [] #for j in range(0, len(fr)): # dat1[j] = im.tolist()[j][0] # dat3.append([fr[j,:].tolist()[n] for n in range(0,noLS+1)]) # I don't know how to print fr in separeted columns #col_data = [dat1, [totblg]*len(dat1), dat3] #n_lines = len(fr) #print_outputs(output_file,header,n_lines,col_data) #Sort the matrices according to Intensity Measure Levels dcm = np.matrix(np.empty(fr.shape)) I = np.argsort(im, axis=0) FR = fr[I,:] IM = im[I,:] log_meanSa, log_stSa = [],[] # Fit number of buildings exceeding each level of damage with Maximum Likelihood regression method for j in range(1,FR.shape[2]): damage = FR[:,0,j]*totblg #number of buildings exceeding damage j dint = damage.astype(int) nc = dint.transpose() num_collapse = nc.tolist() IML = [IM[i,0,0] for i in range(0,len(IM))] [mu, sigma]=mle(num_collapse, IML, totblg, g) log_meanSa.append(mu) log_stSa.append(sigma) print "LS", j print "theta=",np.exp(mu) print "beta=",sigma return [log_meanSa, log_stSa, FR, IML]
# -*- coding: utf-8 -*- """ Created on Thu May 8 18:18:30 2014 @author: chiaracasotto """ import numpy as np from rmtk.vulnerability.common.mle import mle #INPUTS totblg = 100 g = 981 num_collaspe = np.array([0.54, 0.31, 0.31, 0.5, 0.37, 0, 0.46, 0.58, 1, 0.95, 1, 0.88, 0.72, 0.08, 0.65, 0.43, 0.16, 0.59, 0.41, 0.6, 0])*totblg IML = [49.852, 47.056 , 33.012 , 82.125 , 37.499 , 3.159 , 52.205 , 88.363 , 290.11 , 241.78 , 273.37 , 151.24 , 92.957 , 40.101 , 97.535 , 54.723 , 26.081 , 105.61 , 156.81 , 82.401 , 26.807 ] num_collapse = num_collaspe.astype(int) [mu, sigma]=mle(num_collapse.tolist(), IML, totblg, g) print mu, sigma
* totblg ) IML = [ 49.852, 47.056, 33.012, 82.125, 37.499, 3.159, 52.205, 88.363, 290.11, 241.78, 273.37, 151.24, 92.957, 40.101, 97.535, 54.723, 26.081, 105.61, 156.81, 82.401, 26.807, ] num_collapse = num_collaspe.astype(int) [mu, sigma] = mle(num_collapse.tolist(), IML, totblg, g) print mu, sigma