def main(opts): dir_nm = opts.dir_nm X_avg_sol = read_input_full(opts.dir_nm, "plate_0", is_normalize=False) if (opts.fl_nm == "plate_1"): X_avg_sol = X_avg_sol[0][0] elif (opts.fl_nm == "plate_2"): X_avg_sol = X_avg_sol[1][0] X_rom_modes = read_input_full(opts.dir_nm, opts.fl_nm, is_normalize=False) #model predicted data pred_freq_test = read_pred("freq_pred_test_%s_%s" % (dir_nm, opts.fl_nm)) pred_amp_test = read_pred("ampt_pred_test_%s_%s" % (dir_nm, opts.fl_nm)) pred_phase_test = read_pred("phase_pred_test_%s_%s" % (dir_nm, opts.fl_nm)) #gt data gt_amp_freq_test = read_gt(dir_nm, opts.fl_nm) gt_amp_test = gt_amp_freq_test[:, 0] gt_freq_test = gt_amp_freq_test[:, 1] gt_phase_test = gt_amp_freq_test[:, 2] writable(writable("plots", "ReconstructedSolution"), "temp") for t in range(1000): X_sol = X_avg_sol for i in range(10): temporal_val = get_temporal_val(pred_freq_test[i], pred_amp_test[i], \ pred_phase_test[i], t) X_sol += X_rom_modes[i][0] * temporal_val make_plots(X_sol, writable("ReconstructedSolution", \ ("sol_%s_%s_%d"%(opts.dir_nm, opts.fl_nm,t)).replace(".","_")))
def main(opts): dataset = opts.dataset embed_dim = int(opts.dimension) fold = int(opts.fold) # File that contains the edges. Format: source target # Optionally, you can add weights as third column: source target weight edge_f = 'Data/link_prediction/%s_80_20/%s_%d.edgelist' % (dataset, \ dataset, fold) # Specify whether the edges are directed # isDirected = True print "Loading Dataset" # Load graph G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False) #G = G.to_directed() embedding = LaplacianEigenmaps(d=embed_dim) print('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges())) t1 = time() # Learn embedding - accepts a networkx graph or file with edge list print "Starting Embedding" Y, t = embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True) print(embedding._method_name + ':\n\tTraining time: %f' % (time() - t1)) np_save(writable(writable(writable("Embedding_Results", "link_prediction"),\ dataset),str(embed_dim)+"fold_"+str(fold))+"_u", Y)
def main(opts): dataset = opts.dataset embed_dim = int(opts.dimension) # File that contains the edges. Format: source target # Optionally, you can add weights as third column: source target weight edge_f = 'Data/%s.edgelist' % dataset G = nx.read_edgelist(edge_f) A = nx.adjacency_matrix(G) num_points = A.shape[0] D = np.sum(A, axis=0) D = np.squeeze(np.asarray(D)) A = csr_matrix(A) res = optimize.minimize(partial(num_lap, A=A, n=num_points, dim=embed_dim, D=D, c=1), np.random.rand(num_points * embed_dim), jac=partial(lap_jac, A=A, n=num_points, dim=embed_dim, c=1)) print(res.fun) t1 = time() Y = res.x.reshape(num_points, embed_dim) print(embedding._method_name + ':\n\tTraining time: %f' % (time() - t1)) np_save( writable("Embedding_Results", dataset + str(embed_dim) + opts.cost), Y)
def main(opts): dir_nm = opts.dir_nm fl_nm = opts.fl_nm #freq amp from dnn pred_freq = read_pred("freq_pred_test") pred_amp = read_pred("amp_pred_test") #freq amp from fft gt_amp_freq = read_gt(dir_nm, fl_nm) gt_amp = gt_amp_freq[:, 0] gt_freq = gt_amp_freq[:, 1] gt_phase = gt_amp_freq[:, 2] x = np.array(list(range(1000))) #actual data from ROM np_data = np.loadtxt('../Madhav/%s/%s_at.txt' % (dir_nm, fl_nm)) data_mode = np_data[:, 2:] lst_mse_fft, lst_mse_dnn = [], [] with open(writable("evaluate_results", "Results"), "w") as fl_out: for i in range(gt_freq.shape[0]): data_gt = gt_amp[i] * np.sin(2 * np.pi * gt_freq[i] * x + gt_phase[i]) data_pred = gt_amp[i] * np.sin(2 * np.pi * pred_freq[i] * x + gt_phase[i]) data_mode_i = data_mode[:, i] plt.plot(x, data_gt, label="FFT") plt.plot(x, data_pred, label="DNN") plt.plot(x, data_mode_i, label="mode") plt.legend() plt.savefig(writable("evaluate_results", "%d" % i)) plt.close() mse_fft = ((data_gt - data_mode_i)**2).mean() mse_dnn = ((data_pred - data_mode_i)**2).mean() fl_out.write("%.3f, %.3f\n" % (mse_fft, mse_dnn)) lst_mse_fft.append(mse_fft) lst_mse_dnn.append(mse_dnn) print("fft error %.4f" % (sum(lst_mse_fft) / float(len(lst_mse_fft)))) print("dnn error %.4f" % (sum(lst_mse_dnn) / float(len(lst_mse_dnn))))
def main(opts): dir_nm = opts.dir_nm fl_nm = opts.fl_nm np_data = np.loadtxt('../Madhav/%s/%s.txt' % (dir_nm, fl_nm)) lst_mse = [] with open(writable(writable("my_Results", dir_nm), "%s.txt" % fl_nm), "w") as fl_out: for i in range(2, 12): x, y = np_data[:, 0], np_data[:, i] res = fit_sin(x, y) if (res["amp"] < 0): res["amp"] = -res["amp"] res["phase"] = res["phase"] + np.pi #TODO: assumes initial phase is between -pi to pi if (res["phase"] > np.pi): res["phase"] = res["phase"] - 2 * np.pi elif (res["phase"] < -np.pi): res["phase"] = res["phase"] + 2 * np.pi print( "Amplitude=%(amp)s, Angular freq.=%(omega)s, phase=%(phase)s, offset=%(offset)s, Max. Cov.=%(maxcov)s" % res) lst_mse.append(res["mse"]) fig, ax = plt.subplots() plt.scatter(x, y, label="Temporal Mode points", color='green', linewidth=0.3) plt.plot(x, res["fitfunc"](x), label="Sine fit", color='blue') plt.gcf().set_size_inches(16, 8) plt.xlabel("Timestamp", fontsize=30) plt.ylabel("Magnitude", fontsize=30) ax.tick_params(axis='both', which='major', labelsize=25) plt.legend(loc='upper right', fontsize=30) plt.savefig(writable(writable(writable("my_Results",dir_nm), fl_nm), "%d"%(i-2)), \ bbox_inches='tight') plt.close() fl_out.write("%f, %f, %f\n" % (abs(res["amp"]), res["freq"], res["phase"])) print("Mean mse: %f" % (sum(lst_mse) / len(lst_mse)))
def make_plots(x, fl_nm): import pandas as pd import seaborn as sns plt.rcParams['figure.figsize'] = (20.0, 10.0) plt.rcParams['font.family'] = "serif" df = pd.DataFrame(x) df.reindex() ax = sns.heatmap(df, cmap='Greys') sns.set(font_scale=4) #ax.set_yticklabels([]) #ax.set_xticklabels([]) #ax.invert_yaxis() plt.savefig(writable("plots", fl_nm + ".png"), bbox_inches='tight') plt.close()
def main(opts): dir_nm = opts.dir_nm fl_nm = opts.fl_nm np_data = np.loadtxt('../Madhav/%s/%s.txt' % (dir_nm, fl_nm)) lst_mse = [] with open(writable(writable("my_Results", dir_nm), "%s_ord2.txt" % fl_nm), "w") as fl_out: for i in range(2, 12): x, y = np_data[:, 0], np_data[:, i] # res = fit_sin(x, y) # print( "Amplitude=%(amp)s, Angular freq.=%(omega)s, phase=%(phase)s, offset=%(offset)s, Max. Cov.=%(maxcov)s" % res ) res = fit_fft2(x, y) amp_phase = cnvt_to_std_sine_form(res["amp"][0], res["phase"][0]) res["amp"][0], res["phase"][0] = amp_phase[0], amp_phase[1] amp_phase = cnvt_to_std_sine_form(res["amp"][1], res["phase"][1]) res["amp"][1], res["phase"][1] = amp_phase[0], amp_phase[1] lst_mse.append(res["mse"]) fig, ax = plt.subplots() plt.scatter(x, y, label="Temporal Mode points", color='green', linewidth=0.3) plt.plot(x, res["fitfunc"](x), label="sine fit", color='blue') plt.gcf().set_size_inches(16, 8) plt.xlabel("Timestemp", fontsize=30) plt.ylabel("Magnitude", fontsize=30) ax.tick_params(axis="both", which="major", labelsize=25) plt.legend(loc='upper right', fontsize=30) plt.savefig(writable(writable(writable("my_Results",dir_nm), fl_nm), "%d_ord2"%(i-2)),\ bbox_inches='tight') plt.close() # fl_out.write("%f, %f\n"%(abs(res["amp"]), res["freq"])) fl_out.write("%f, %f, %f, %f, %f, %f\n"%(res["amp"][0], res["amp"][1], \ res["freq"][0], res["freq"][1], res["phase"][0], res["phase"][1])) print("Mean mse: %f" % (sum(lst_mse) / len(lst_mse)))
def main(opts): #col = 1 for frequency #col = 0 for amplitude #col = 2 for phase global glo_min global glo_max if (opts.char_val == "ampt"): col = 0 glo_min, glo_max = 0.001, 100 elif (opts.char_val == "freq"): col = 1 glo_min, glo_max = 0.01, 1e-1 elif (opts.char_val == "phase"): col = 2 glo_min, glo_max = -np.pi, np.pi #number of modes to use for training and testing train_num_mode = 10 test_num_mode = 10 tot_epochs = opts.epoch lst_dir_nm = opts.train_dir_nm.split(",") test_dir_nm = opts.test_dir_nm if len(lst_dir_nm) == 2: #first X_1 = read_input(lst_dir_nm[0], train_num_mode, opts.fl_nm) Y_1 = np.loadtxt("../freq_ampt/my_Results/%s/%s_at.txt"%(lst_dir_nm[0], \ opts.fl_nm), delimiter=',') Y_1 = Y_1[:train_num_mode, col] #second X_2 = read_input(lst_dir_nm[1], train_num_mode, opts.fl_nm) Y_2 = np.loadtxt("../freq_ampt/my_Results/%s/%s_at.txt"%(lst_dir_nm[1], \ opts.fl_nm), delimiter=',') Y_2 = Y_2[:train_num_mode, col] #X_3 = read_input(lst_dir_nm[2], train_num_mode, opts.fl_nm) #Y_3 = np.loadtxt("../freq_ampt/Results/%s/%s_at.txt"%(lst_dir_nm[2], \ # opts.fl_nm), delimiter=',') X = np.concatenate((X_1, X_2), axis=0) Y = np.concatenate((Y_1, Y_2), axis=0) #this is for re70k_fsi elif len(lst_dir_nm) == 1: X = read_input_rescale(lst_dir_nm[0], train_num_mode, opts.fl_nm) Y = np.loadtxt("../freq_ampt/my_Results/%s/%s_at.txt"%(lst_dir_nm[0], \ opts.fl_nm), delimiter=',') Y = Y[:train_num_mode, col] #X, Y = augment_data(X, Y) val_max, val_min = Y.max(), Y.min() #X = X_1 #Y = Y_1 #Y = norm(Y) Y = norm(Y, "unit_ab") #Y = norm(Y, "sigmoid") if (test_dir_nm != "re70k_fsi"): X_test = read_input(test_dir_nm, test_num_mode, opts.fl_nm) Y_test = np.loadtxt("../freq_ampt/my_Results/%s/%s_at.txt"%(test_dir_nm, \ opts.fl_nm), delimiter=',') else: X_test = read_input_rescale(test_dir_nm, test_num_mode, opts.fl_nm) Y_test = np.loadtxt("../freq_ampt/my_Results/%s/%s_at.txt"%(test_dir_nm, \ opts.fl_nm), delimiter=',') Y_test = Y_test[:test_num_mode, col] #Y_test = norm(Y_test, "") Y_test = norm(Y_test, "unit_ab") #Y_test = norm(Y_test, "sigmoid") #plots for i in range(10): make_plots(X[i][0], "%s_%s_mode_%d" % (lst_dir_nm[0], opts.fl_nm, i + 1)) for i in range(10): make_plots(X[10 + i][0], "%s_%s_mode_%d" % (lst_dir_nm[1], opts.fl_nm, i + 1)) for i in range(10): make_plots(X_test[i][0], "%s_%s_mode_%d" % (test_dir_nm, opts.fl_nm, i + 1)) Y_class = numerical_to_class(Y) Y_test_class = numerical_to_class(Y_test) x, y = Variable(torch.from_numpy(X)).float(), Variable( torch.from_numpy(Y)).float() x_test, y_test = Variable(torch.from_numpy(X_test)).float(), \ Variable(torch.from_numpy(Y_test)).float() #only when you normalize y's to unit normal #net = Net_class() #class_prediction = train_with_CrossEntropLoss(x, y, x_test, y_test, net, tot_epochs) #class_prediction = class_prediction.data.numpy() #with open("class_prediction_Result", "w") as fl_out: # for i in range(class_prediction.shape[0]): # fl_out.write("%f, %f, %f\n"% ( class_prediction[i][0], class_prediction[i][1], Y[i])) net = Net_class() pred_test, pred_train = train_with_BinaryEntropLoss( x, y, x_test, y_test, net, tot_epochs) #inverse of unit #pred_test = pred_test*(val_max-val_min)+val_min #pred_train = pred_train*(val_max-val_min)+val_min #inverse of sigmoid #pred_test = np.log(pred_test/(1-pred_test)) #pred_train = np.log(pred_train/(1-pred_train)) #inverse of unit_ab pred_test = pred_test * (glo_max - glo_min) + glo_min pred_train = pred_train * (glo_max - glo_min) + glo_min #freq_amp = "freq" if col else "amp" with open( writable( "Results", "%s_pred_test_%s_%s" % (opts.char_val, test_dir_nm, opts.fl_nm)), "w") as fl_out: for i in range(pred_test.shape[0]): fl_out.write("%f\n" % (pred_test[i])) with open( writable( "Results", "%s_pred_train_%s_%s" % (opts.char_val, '_'.join(lst_dir_nm), opts.fl_nm)), "w") as fl_out: for i in range(pred_train.shape[0]): fl_out.write("%f\n" % (pred_train[i]))