def run(max_t, delta_t, max_z, delta_z, latitude, medium, methode): #define variables to make the code clearer number_t = int(max_t / delta_t) number_z = int(max_z / delta_z) r = medium.lam * (delta_t * 24 * 3600) / (medium.c_p * medium.rho * delta_z**2) #solution grid T = np.zeros((number_t, number_z + 1)) T[0][:] = T_res #imort the matching matrices A, B and vector C A = methode.A(number_z + 1, r) invB = invert(methode.B(number_z + 1, r)) #invert C = methode.C(number_z + 1, r, T_res) #Solving the linear equation sistem for each time step for i in np.arange(number_t - 1): #amplify boundary conditions for each time step C[0] = -delta_t * 24 * 3600 / ( medium.c_p * medium.rho * delta_z ) * (-medium.epsilon * sigma * T[i][0]**4 + (1 - medium.albedo) * S_0 * np.cos(zenit_value(i * delta_t, latitude) / 360 * 2 * np.pi)) T[i + 1][:] = np.dot(invB, np.dot(A, T[i][:]) - C) #tranpose solution grid and delete T_loss line T = np.delete(T.transpose(), number_z, 0) return T
def main(data, vel_locs, inv_w, inv_beta, eps_w, eps_beta): # the default examples use synthetic data (see synthetic_data.py) # solve the inverse problem sol, fwd = invert(data, vel_locs, inv_w, inv_beta, eps_w, eps_beta) # compute normalized elevation misfit mis = norm(fwd - data[0]) / norm(data[0]) return sol, fwd, mis
sep='\n ', file=open('TrainingResult.txt', 'w')) y_pred = regressor.predict(X_test) plt.plot(X_test, y_test, 'o', color='orange') plt.plot(X_test, y_pred, 'o', color='blue') plt.savefig("./" + 'plot.pdf') plt.show() inversionResults = pd.DataFrame(columns=['accuracy_percent']) for i, value in enumerate(y_test): desired_output = [value] guessedInput = inversion.invert(regressor, desired_output, pd.DataFrame(X_test).columns.size, gs.best_params_['learning_rate_init']) guessedInput = pd.DataFrame(guessedInput).T accuracy = 1 - abs((regressor.predict(guessedInput) - desired_output) / (y_test.max() - y_test.min())) inversionResults = inversionResults.append( {'accuracy_percent': accuracy[0] * 100}, ignore_index=True) print('guessed input vector in X_test: ', np.array(guessedInput), 'predicted output vector for y_test: ', regressor.predict(guessedInput), 'desired output value in y_test: ', desired_output, 'error: ',
def setup_run_inversion(home,dbpath,dbname,ncoeff,rng,sdist,Mc,smth,vref,mdep_ffdf,predictive_parameter='pga',data_correct=-0.6): ''' Make the necessary matrices, invert, and save model output Input: home: String with project home (i.e., anza) dbpath: String to database path dbname: String with database path name (i.e., db2013 for pckl/db2013/) ncoeff: Number of coefficients used in inversion rng: Array of ranges to use in constraining the inversion sdist: Number of distances to include in smoothing - there will be this many extra equations added on at each range boundary Mc: M squared centering term (8.5 in ASK2014) smth: Smoothing vref: Reference vs30 value (like 760 m/s) mdep_ffdf: Flag to add mag dependent ffdf (0/1=no/yes) predictive_parameter: Default is pga. Else, 'pgv', or... data_correct: Correct data for a term? 0/1 = no/yes - vs30 term DEFAULT: vs30 correct Output: inversion object: Stored in model path, based on ranges etc. used in inversion ''' import cPickle as pickle import cdefs as cdf import inversion as inv import numpy as np #Get directories for things: obj_dir=home+'/models/pckl/'+dbname+'/' #Open database object: dbfile=open(dbpath,'r') db=pickle.load(dbfile) dbfile.close() ### DEBUGGING... print 'ncoeff is %i, predictive parameter is %s, and data_correct is %i' % (ncoeff,predictive_parameter,data_correct) ##### print 'data correct is: %f' % data_correct print 'smth is %i, vref is %i' % (smth,vref) #Invert: #Make matrices G,d=inv.iinit_predparam(db,ncoeff,rng,sdist,Mc,smth,vref,mdep_ffdf,predictive_parameter=predictive_parameter,data_correct=data_correct) #Invert m, resid, L2norm, VR, rank, svals=inv.invert(G,d) print 'Inversion residual is: ' print G.dot(m) - d print 'Mean of inversion residual is: ' print np.mean(G.dot(m) - d) #Get the string for the filename, based on the ranges: for k in range(len(rng)): if k==0: strname=np.str(rng[k]) else: strname=strname+'_'+np.str(rng[k]) basename='regr_' + predictive_parameter + '_Mc'+str(Mc)+'_'+strname+'_VR_'+np.str(np.around(VR,decimals=1)) # This is a normal inversion, so set stderror and tvalue to "NaN", since they do not apply: stderror = float('NaN') tvalue = float('NaN') #### DEBUGGING.... print '\n ln the data, pga, are: \n' print np.log(db.pga_pg) print '\n the model, what goes into model.d, is: \n' print d #Put into an inversion object: invdat=cdf.invinfo(G,d,m,resid,L2norm,VR,rank,svals,rng,sdist,smth,stderror,tvalue) fname=obj_dir+basename+'.pckl' datobjfile=open(fname,'w') pickle.dump(invdat,datobjfile) datobjfile.close() # Now, also print the model coefficients to a file: model_coeff_file = obj_dir + 'coeff_' + basename + '.txt' coeff=open(model_coeff_file,'w') coeff.write('Model Coefficients for inversion' + basename + ':') for coeff_i in range(ncoeff): coeff.write('\n a' + str(coeff_i+1) + ' = ' + str(m[coeff_i])) coeff.close() #Return the model info... return invdat
#Number of distances to include in smoothing - there will be this many extra #equations added on at each range boundary sdist=np.array([1,5,10,15,20]) #Smoothing factor smth=500 #Use magnitude-dependent fictitious depth/finite fault dimension factor?#### #no == 0, yes == 1 mdep_ffdf=0 #Invert: #Make matrices G,d=inv.iinit_pga(abdb,ncoeff,rng,sdist,smth,mdep_ffdf) #Invert m, resid, L2norm, VR, rank, svals=inv.invert(G,d) #Compute the predicted value (from the GMPE) at each data point vref=760 #Magnitude dependent fictitous depth? if mdep_ffdf==0: d_predicted=gm.compute_model(m,rng,abdb.mw,abdb.r,abdb.ffdf,abdb.vs30,vref,mdep_ffdf) elif mdep_ffdf==1: d_predicted=gm.compute_model(m,rng,abdb.mw,abdb.r,abdb.md_ffdf,abdb.vs30,vref,mdep_ffdf) #Compute the magnitude/log10pga for each distance, to plot on top of data: mw_model,d_model=gm.compute_model_fixeddist(m,rng,sdist,mdep_ffdf) #Get the NGA predictions to plot on the same figure: #Coefficient file:
batchSize)) clients.append( client.Client('mnist/mnist_unif_bad_4_9', epsilon, numClasses, numFeatures, batchSize)) clients.append( client.Client('mnist/mnist_unif_bad_4_9', epsilon, numClasses, numFeatures, batchSize)) # Train for ITER iterations for i in xrange(ITER): print("iteration: " + str(i)) # All clients submit gradients for client in clients: client.submitGradient(models) # All models branches update for branch in models: branch.updateModel() # Check test accuracy for client in clients: client.test(models) client.poisoningCompare(models, 4, 9) ### Try and link gradients together invertedData = inversion.invert(models[0].gradientHistory[488], numClasses, numFeatures) imgData = np.reshape(invertedData, (28, 28)) plt.imshow(imgData, cmap='gray') plt.show()