def eigvalAccuracySingleElectron(n = list(range(10, 350,10)), rhomax=4.827, eps = 12, method=1, fitIdxStart = 10, sim = False, datafile="data.dat"): if sim: open(f"data/{datafile}", "w+").close() for n_ in n: subprocess.run(f"./main.out {n_} {n_} {eps} {rhomax} 1 1 {datafile}".split()) print(f"N: {n_}, {round(100*(n_-n[0])/(n[-1]-n[0]), 2)} %") data = read_data(datafile) avals = np.array([3,7,11,15]) # analytic eigenvals vals = np.zeros(len(n)) # array to be filled for i,n_ in enumerate(n): vals[i] = -np.log10(1/4*np.sum(np.abs(np.sort(data[str(n_)].eigvals)[:4]-avals))) ax.plot(n, vals, color="red", lw=3, alpha=1, label="Numerical values") # using scipys optimize.curve_fit we find the best coefficient for the model a*log10(n)+b f = lambda t,a,b: a*np.log10(t)+b popt, pcov = optimize.curve_fit(f, n[fitIdxStart:], vals[fitIdxStart:]) x = np.linspace(n[fitIdxStart], 4000, 1000) #ax.plot(x,f(x,*popt), color="blue", lw=2, alpha=0.5, label=f"Fit ${round(popt[0],2)}\log"+r"_{10}"+f"(n){round(popt[1],3)}$") ax.set_xlabel("Matrix size, n") ax.set_ylabel("No. of correct digits") legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=0.4, frameon = True,loc='lower right') legend.get_frame().set_facecolor('white') ax.set_title(f"Number of correct digits in eigenvalues")
def predict_from_model(model_dir, pred_file): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print("Retrieving flag object for parameters") flags = flagreader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode print("Making network now") train_loader, test_loader = datareader.read_data(x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=flags.batch_size, normalize_input=flags.normalize_input, data_dir=flags.data_dir, test_ratio=0.999) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start pred now:") ntwk.predict(pred_file) print("Prediction finished")
def wavefunctionTwoElectron(n=100, method=[1,2], eps=12, rho_max=4.827, omega=[0.01, 0.5, 1, 5], sim=False, datafile="data.dat"): beta_esq = 1.44 # eVnm chbar = 1240 # eVnm me = 0.511 # eV alpha = chbar ** 2 / me / beta_esq if "__iter__" not in dir(omega): omega = [omega] if sim: open(f"data/{datafile}", "w").close() # clear file for method_ in method: for w in omega: subprocess.run(f"./main.out {n}{eps}{rho_max}{w}{method_} {n} {eps} {rho_max} {method_} {w} {datafile}".split()) sols = read_data(datafile) for method_ in method: for w in omega: data = sols[f"{n}{eps}{rho_max}{w}{method_}"] vals = data.eigvals vecs = data.eigvecs vecs /= np.linalg.norm(vecs, axis=0) vecs, vals = mat_sort_by_array(vecs, vals) rho = np.linspace(0, data.pmax, data.n) for i in range(1): print(np.linalg.norm(vecs[:, i])) ax.plot(rho, vecs[:, i] ** 2, lw=2,) # label=f"$\omega = {omega[i]}$, norm: {round(np.linalg.norm(vecs[:, i]),3)}" ax.legend([f"$\omega = {omega_}$" for omega_ in omega])
def eigvalsSingleElectron(n = 100, rhomax = np.linspace(4,6, 100), eps = 16, omega = 1, method=1, eigcount = 10, sim = False, datafile = "data.dat"): if not isinstance(n, list) and not isinstance(n, np.ndarray): var = "rhomax" #rhomax varies vars = rhomax static = f"$n = {n}$" else: var = "n" #else n varies vars = n static = r"$\rho_{max}=$" + f"${rhomax}$" if sim: # run simulation if prompted print("Solving ...") open(f"data/{datafile}", "w").close() # clear file start = time.time() if var == "n": for n in vars: subprocess.run(f"./main.out {n}{rhomax} {n} {eps} {rhomax} {method} {omega} {datafile}".split()) print(f"N: {n}/{vars[-1]}, {round(100*(n-vars[0])/(vars[-1]-vars[0]), 2)} %") print(f"Done in {round(time.time()- start,3)} s") elif var == "rhomax": for rhomax in vars: subprocess.run(f"./main.out {n}{rhomax} {n} {eps} {rhomax} 1 1 {datafile}".split()) print(f"ρ: {round(rhomax,3)}/{vars[-1]}, {round(100*(rhomax-vars[0])/(vars[-1]-vars[0]), 2)} %") print(f"Done in {round(time.time()- start,3)} s") sols = read_data(datafile) colors = list(Color("red").range_to(Color("cyan"),len(vars))) #TO get a gradient effect colors = [str(color) for color in colors] analytical = np.array([4*a+3 for a in range(0,eigcount)]) # the analytical eigenvalues indexes = np.arange(1,eigcount+1) if var == "n": for i,n in enumerate(vars): ax.plot(indexes, np.sort(sols[f"{n}{rhomax}"].eigvals)[:eigcount],'-o', markersize=1, color=colors[i], lw=1.5, alpha=0.6) label = f"Matrix size, $n$" elif var == "rhomax": for i,rhomax in enumerate(vars): ax.plot(indexes, np.sort(sols[f"{n}{rhomax}"].eigvals)[:eigcount],'-o', markersize=1, color=colors[i], lw=1.5, alpha=0.6) label = r"$\rho_{max}$" ax.plot(indexes, analytical, label="Analytical", color="black", lw=2, alpha=0.8, linestyle="--") ax.set_xlabel("Index, $i$") ax.set_ylabel(r"$\lambda_{i}$") ax.set_ylim(top=analytical[-1]*1.2, bottom=analytical[0]*0.8) ax.set_xlim(1, eigcount) cmap = mpl.colors.ListedColormap(colors) norm = mpl.colors.Normalize(vmin=vars[0],vmax=vars[-1]) ax.figure.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax, orientation='horizontal', label=label, ticks=[int(a) for a in np.linspace(vars[0], vars[-1],5)] ) legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=0.4, frameon = True,loc='lower right') legend.get_frame().set_facecolor('white') ax.set_title(f"Eigenvalues for {static}")
def setup(file): data = dr.read_data(file) data_size = len(data.keys()) filename_JSim = 'JSims/' + file.replace('.dat', '_truth.txt') # comment in when using an unknown data set # preprocessing.set_initial_JSim(data, data_size, filename_JSim) JSim = preprocessing.set_JSim(data_size, filename_JSim) return data, JSim
def plotEigvalAccuracyDynamicN(ax, Ns, eps=12, pmax=20, datafile="data.dat", sim=False): if sim: open("data.dat", "w+").close() for n in Ns: subprocess.run(f"./main.out {n} {n} {eps} {pmax} 1 1".split()) print(f"N: {n}, {round(100*(n-Ns[0])/(Ns[-1]-Ns[0]), 2)} %") subprocess.run("cp data.dat dataFindAcc.dat".split()) data = read_data(datafile) avals = np.array([3, 7, 11, 15]) # analytic eigenvals vals = np.zeros(len(Ns)) # array to be filled for i, n in enumerate(Ns): vals[i] = -np.log10( 1 / 4 * np.sum(np.abs(np.sort(data[str(n)].eigvals)[:4] - avals))) ax.plot(Ns, vals, color="red", lw=3, alpha=1, label="Numerical values") # using scipys optimize.curve_fit we find the best coefficient for the model a*log10(n)+b f = lambda t, a, b: a * np.log10(t) + b popt, pcov = optimize.curve_fit(f, Ns[100:], vals[100:]) x = np.linspace(Ns[100], 4000, 1000) ax.plot(x, f(x, *popt), color="blue", lw=2, alpha=0.5, label=f"Fit ${round(popt[0],2)}\log" + r"_{10}" + f"(n){round(popt[1],3)}$") ax.set_xlabel("Matrix size, n") ax.set_ylabel("No. of correct digits") legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=0.4, frameon=True, loc='lower right') legend.get_frame().set_facecolor('white') ax.set_title(f"Number of correct digits in eigenvalues")
def timer(n=range(1, 10), rhomax=[1, 10, 20], eps=12, omega=[0, 1, [0.01, 5]], method=[0, 1, 2], sim=False, datafile="data.dat"): # plots elapsed time of solving against N on axis ax, as well as comparing to n**2 and n**2*ln(n) if sim: print("Solving ...") open(f"data/{datafile}", "w+").close() # clear file for method_ in method: start = time.time() if not isinstance(omega[method_], (list, np.ndarray)): w = [omega[method_]] else: w = omega[method_] for wi in w: for N in n: subprocess.run(f"./main.out {N}{eps}{rhomax[method_]}{method_}{wi} {N} {eps} {rhomax[method_]} {method_} {wi} {datafile}".split()) print(f"{round(100*(N-n[0])/(n[-1]-n[0]), 2)} %, N: {N}, eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {wi}") print(f"Finished in {round(time.time() -start,3)} s.") #if datafile != "data.dat": # move content to designated file #subprocess.run(f"cp data.dat {datafile}".split()) sols = read_data(datafile) #all solutions for method_ in method: # all methods are plotted if "__iter__" not in dir(omega[method_]): w = [omega[method_]] else: w = omega[method_] for i, wi in enumerate(w): # all omegas are plotted tim = np.array([sols[f"{N}{eps}{rhomax[method_]}{method_}{wi}"].time for N in n]) # array of counted transformations color = {0:["red"], 1:["blue"], 2:["lime", "mediumseagreen", "seagreen", "darkgreen"]}[method_][i] #every method gets unique color label = {0:"Buckling beam", 1:r"Quantum 1, $\rho_{max} = $"+str(rhomax[method_]), 2:r"Quantum 2, $\rho_{max} = $"+f"{rhomax[method_]}, $\omega_r = {wi}$"}[method_] ax.plot(n, tim, "-o", markersize=12, color=color, alpha=0.8, lw=5, label=label) plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame() frame.set_facecolor('white') ax.set_xlabel("Matrix size, $n$") ax.set_ylabel("Time, $s$") plt.title(r"Time of solving")#, varying $\omega_r$") print(f" eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {wi}:")
def transforms(n=np.arange(20, 350, 3), rhomax=[1, 10, 20], eps=12, omega=[0, 1, 5], method=[0, 1, 2], sim=False, datafile="POLYdata.dat"): # plots num of transformations against N on axis ax, as well as comparing to n**2 and n**2*ln(n) if sim: print("Solving ...") open(f"data/{datafile}", "w+").close() # clear file for method_ in method: start = time.time() for N in n: subprocess.run( f"./main.out {N}{eps}{rhomax[method_]}{method_}{omega[method_]} {N} {eps} {rhomax[method_]} {method_} {omega[method_]} {datafile}" .split()) print( f"{round(100*(N-n[0])/(n[-1]-n[0]), 2)} %, N: {N}, eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {omega[method_]}" ) print(f"Done in {round(time.time() -start,3)} s.") #if datafile != "data.dat": # move content to designated file #subprocess.run(f"cp data.dat {datafile}".split()) sols = read_data(datafile) #all solutions for method_ in [0, 1, 2]: # all methods are plotted trans = np.array([ sols[f"{N}{eps}{rhomax[method_]}{method_}{omega[method_]}"]. transformations for N in n ]) # array of counted transformations print( f" eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {omega[method_]}:" ) coeff, residual, rank, sv, cond = np.polyfit(np.log10(n), np.log10(trans), deg=1, full=True) print(residual, rank, sv, cond) print(f"a= {coeff[0]} +- {residual[0]}") plt.plot(np.log10(n), np.log10(trans), label=str(method_)) plt.show()
def optimalRhomaxSingleElectron(n= 100,rhomax = np.linspace(3.2,5.7, 1000), eps = 12, omega=1, method=1, sim = False, datafile = "data.dat" ): # given a dataset with rhomax (rhomax) finds which rhomax makes each of the first 4 eigvals of quantum 1 the most accurate # produces plot as well if sim: print("Solving ...") open(f"data/{datafile}", "w").close() # clear file start = time.time() for _rhomax in rhomax: subprocess.run(f"./main.out {n}{_rhomax} {n} {eps} {_rhomax} {method} {omega} {datafile}".split()) print(f"ρ: {round(_rhomax,3)}/{rhomax[-1]}, {round(100*(_rhomax-rhomax[0])/(rhomax[-1]-rhomax[0]), 2)} %") print(f"Done in {round(time.time()- start,3)} s") sols = read_data(datafile) # read solutions colors = list(Color("red").range_to(Color("blue"),4)) # prettyyyy colorrssss colors = [str(color) for color in colors] analytical = np.array([4*a+3 for a in range(0,4)]) # analytical eigenvalues erel = np.zeros((len(rhomax),4)) #error_rel, to be filled for i,_rhomax in enumerate(rhomax): erel[i] = np.abs(np.sort(sols[f"{n}{_rhomax}"].eigvals)[:4]-analytical)/analytical # fill with all relative errors minidx = erel.argmin(axis=0) # find where the graph has its lowest point, a.k.a where the error is the lowest # plotting code for i in range(4): ax.plot(rhomax, erel[:,i], color = colors[i]) for i,idx in enumerate(minidx): print(f"eig_{i+1} Best rhomax: {round(rhomax[idx],5)}, err_rel: {round(erel[idx][i],6)}") # print the results ax.plot(rhomax[idx], erel[idx][i], "-o", label = f"$\lambda_{i+1}: $"+r"$\rho_{max}="+f"{round(rhomax[idx],3)}$,"+r"$\epsilon_{rel}="+"%.2e$" %erel[idx][i],color = colors[i]) legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=0.4, frameon = True) legend.get_frame().set_facecolor('white') plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) ax.set_title(r"$\epsilon_{rel}$ for four first eigenvalues") ax.set_xlabel(r"$\rho_{max}$") ax.set_ylabel(r"$\epsilon_{rel}$")
def timeArmadilloBeam(n=np.arange(10, 151, 10), rho_max=1, eps=12, omega=0, method=0, datafile="data.dat"): arma = [] jacobi = [] print("Solving ...") start = time.time() for _n in n: open(f"data/{datafile}", "w").close() # clear file subprocess.run(f"./arma.out {_n} {datafile}".split()) _, _, t = read_arma(datafile) arma.append(t) open(f"data/{datafile}", "w").close() # clear file1 subprocess.run(f"./main.out {_n}{eps}{rho_max}{method}{omega} {_n} {eps} {rho_max} {method} {omega} {datafile}".split()) t = read_data(datafile)[f"{_n}{eps}{rho_max}{method}{omega}"].time jacobi.append(t) print(f"ρ: {round(_n,3)}/{n[-1]}, {round(100*(_n-n[0])/(n[-1]-n[0]), 2)} %") print(f"Done in {round(time.time()- start,3)} s") arma = np.log10(np.asarray(arma)) jacobi = np.log10(np.asarray(jacobi)) n = np.log10(n) Aa, Ab = np.polyfit(n, arma, deg=1) Ja, Jb = np.polyfit(n, jacobi, deg=1) ax.plot(n, jacobi, "-o", ms=16, lw=8, color="red", label=f"jacobi") ax.plot(n, arma, "-o", ms=16, lw=8, color="gold", label=f"armadillo") x = np.linspace(n[0], n[-1], 1000) ax.plot(x, Ja * x + Jb, ms=2.5, color="red", lw=8, alpha=0.8, label=f"fit jacobi, a={round(Ja, 4)}") ax.plot(x, Aa * x + Ab, ms=2.5, color="gold", lw=8, alpha=0.8, label=f"fir armadillo, a={round(Aa, 4)}") plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame().set_facecolor('white') ax.set_xlabel("Matrix size, log(n)") ax.set_ylabel("Time, log(s)") ax.set_title("Time comparison between jacobi and armadillo") plt.tight_layout()
def training_from_flag(flags): """ Training interface. 1. Read in data 2. Initialize network 3. Train network 4. Record flags :param flag: The training flags read from command line or parameter.py :return: None """ if flags.use_cpu_only: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # # Import the data train_loader, test_loader = datareader.read_data(x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=flags.batch_size, normalize_input=flags.normalize_input, data_dir=flags.data_dir, test_ratio=flags.test_ratio) # Reset the boundary if normalized if flags.normalize_input: flags.geoboundary_norm = [-1, 1, -1, 1] print("Geometry boundary is set to:", flags.geoboundary) # Make Network print("Making network now") ntwk = Network(Forward, flags, train_loader, test_loader) # Training process print("Start training now...") #ntwk.pretrain() #ntwk.load_pretrain() ntwk.train() # Do the house keeping, write the parameters and put into folder, also use pickle to save the flags object write_flags_and_BVE(flags, ntwk.best_validation_loss, ntwk.ckpt_dir)
def transforms(n=range(1,10), rhomax = [1,10,20], eps = 12, omega = [0,1,5], method = [0,1,2], sim=False, datafile="data.dat"): # plots num of transformations against N on axis ax, as well as comparing to n**2 and n**2*ln(n) if sim: print("Solving ...") open(f"data/{datafile}", "w+").close() # clear file for method_ in method: start = time.time() for N in n: subprocess.run(f"./main.out {N}{eps}{rhomax[method_]}{method_}{omega[method_]} {N} {eps} {rhomax[method_]} {method_} {omega[method_]} {datafile}".split()) print(f"{round(100*(N-n[0])/(n[-1]-n[0]), 2)} %, N: {N}, eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {omega[method_]}") print(f"Done in {round(time.time() -start,3)} s.") #if datafile != "data.dat": # move content to designated file #subprocess.run(f"cp data.dat {datafile}".split()) sols = read_data(datafile) #all solutions for method_ in method: # all methods are plotted trans = np.array([sols[f"{N}{eps}{rhomax[method_]}{method_}{omega[method_]}"].transformations for N in n]) # array of counted transformations coeff, residual, rank, sv, cond = np.polyfit(np.log10(n), np.log10(trans), deg =1, full=True) print(f"a = {coeff[0]} +- {residual[0]}") color = {0:"red", 1:"blue", 2:"green"}[method_] #every method gets unique color label = {0:"Buckling beam", 1:r"Quantum 1, $\rho_{max} = $"+str(rhomax[method_]), 2:r"Quantum 2, $\rho_{max} = $"+f"{rhomax[method_]}, $\omega_r = {omega[method_]}$"}[method_] ax.plot(n, trans, "-o", markersize = 2.5, color=color, alpha=0.8, lw=2.8, label=label) plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon = True, fontsize="x-small") frame = legend.get_frame() frame.set_facecolor('white') ax.set_xlabel("Matrix size, $n$") ax.set_ylabel("Transformations, $m$") plt.title(r"Transformations; $m$" ) print(f" eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {omega[method_]}:")
def main(): drives = dr.read_data(open("gpsdata/all.tsv"), 1000) calculate_distances(drives) calculate_overlaps(drives, 0.01, 900) pl.distance_plot(drives) pl.max_overlap_fractions_plot(drives)
parser.add_argument('-threads', required=False, default=1) parser.add_argument('-filename', required=False, default='baseline_egoset_nyt_state.txt', help='Filename to write') args = parser.parse_args() start_time_str = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) if not os.path.isdir('baseline_log'): os.mkdir('baseline_log') sys.stdout = Logger("baseline_log/" + start_time_str + '_' + args.filename) threads = int(args.threads) # Global Variable filename = args.filename # Global Variable data = read_data( args.data, args.evaluation_class, args.evaluation_query).loadall( ) # Global Variable -- remains unchanged inside functions. print_info = ['\n'] * len(data.queries) # Global Variable start = time.time() if threads == 1: actual_print = [] for seedEids in tqdm(zip(range(len(data.queries)), data.queries), total=len(data.queries)): actual_print.append(expand_single_query_egoset(seedEids)) else: # Parallel Processing pool = Pool(processes=threads, maxtasksperchild=1) actual_print = pool.map(expand_single_query_egoset, zip(range(len(data.queries)), data.queries))
parser = argparse.ArgumentParser() parser.add_argument('--few', required=False, default=5, type=int) parser.add_argument('--num_query', required=False, default=10, type=int) parser.add_argument('--batch', required=False, default=20, type=int) parser.add_argument('--epoch', required=False, default=1000, type=int) parser.add_argument('--patience', required=False, default=30, type=int) parser.add_argument('--data', required=False, default='data/', type=str) parser.add_argument('--device', required=False, default='cpu', type=str) parser.add_argument('--log_step', required=False, default=30, type=int) parser.add_argument('--dropout', required=False, default=0.5, type=float) parser.add_argument('--margin', required=False, default=2.0, type=float) parser.add_argument('--metric', required=False, default='AUPRC', type=str) parser.add_argument('--task', required=False, default='test', type=str) args = parser.parse_args() HG, drugs, drugs_id, edge_types_id, drug_drug, pretrain_embed = read_data(args.data) all_relation = sorted(list(edge_types_id.values())) _, test_rel = train_test_split(all_relation, test_size=0.1, random_state=0) train_rel, valid_rel = train_test_split(_, test_size=0.11111, random_state=0) train_dataset = fewshot_train_dataset(args.few, args.num_query, drugs, drugs_id, train_rel, drug_drug) valid_dataset = fewshot_test_dataset(args.few, drugs, drugs_id, valid_rel, drug_drug) test_dataset = fewshot_test_dataset(args.few, drugs, drugs_id, test_rel, drug_drug) if args.task == 'train': train_loader = batch_loader(train_dataset, batch_size=args.batch, shuffle=True, num_worker=1) model = Model(pretrain_embed, args.few, in_features=100, dropout=args.dropout, device=args.device) model.to(args.device) model.train() loss_fn = nn.MarginRankingLoss(margin=args.margin)
rankedmode = True if len(sys.argv) == 1: guimode = True elif sys.argv[1] == 'gui': guimode = True elif sys.argv[1] == 'console': guimode = False if len(sys.argv) == 3 and sys.argv[2] == 'bool': rankedmode = False else: print("ERROR: Incorrect arguments supplied") print("Run either 'app.py', 'app.py console' or 'app.py console bool'") sys.exit(2) docs = read_data() index = build_index(docs, from_dump=True) query = '' if guimode: gui = GUI() else: # console mode logic while True: if query == '': print('Enter query: ', end='') query = input() # termination condition of console mode if query == '\q': sys.exit(0)
def __init__(self, device, pop, batches, top, trunc, mut, model_flags, maxgen): ' Constructor downloads parameters and allocates memory for models and data' self.device = torch.device(device) self.num_models = pop self.num_batches = batches self.models = [] self.train_data = [] self.test_data = [] self.val_data = [] self.mut = mut self.max_gen = maxgen self.flags = model_flags # Set trunc threshold to integer if top == 0: self.trunc_threshold = int(trunc * pop) else: self.trunc_threshold = top self.elite_eval = torch.zeros(self.trunc_threshold, device=self.device) (y, m, d, hr, min, s, x1, x2, x3) = time.localtime(time.time()) #self.writer = SummaryWriter("results/P{}_G{}_tr{}_{}{}{}_{}{}{}".format(pop, maxgen, self.trunc_threshold,y,m,d, # hr,min,s)) self.writer = SummaryWriter( "results/Iris_m{}_P{}_t{}_{}{}{}_{}{}{}".format( mut, pop, top, y, m, d, hr, min, s)) 'Model generation. Created on cpu, moved to gpu, ref stored on cpu' for i in range(pop): #self.models.append(Forward(model_flags).cuda(self.device)) self.models.append(model(model_flags).cuda(self.device)) 'Data set Storage' train_data_loader, test_data_loader, val_data_loader = datareader.read_data( x_range=[i for i in range(0, 4)], y_range=[i for i in range(4, 7)], geoboundary=[20, 200, 20, 100], batch_size=0, set_size=batches, normalize_input=True, data_dir='./', test_ratio=0.2) for (geometry, spectra) in train_data_loader: self.train_data.append( (geometry.to(self.device), spectra.to(self.device))) for (geometry, spectra) in test_data_loader: self.test_data.append( (geometry.to(self.device), spectra.to(self.device))) for (geometry, spectra) in val_data_loader: self.val_data.append( (geometry.to(self.device), spectra.to(self.device))) ' Load in best_model.pt and start a population of its mutants ' """ with torch.no_grad(): rand_mut = self.collect_random_mutations() self.models[0] = torch.load('best_model.pt', map_location=self.device) self.models[0].eval() m_t = self.models[0] for i in range(1, pop): m = self.models[i] for (mp, m_tp, mut) in zip(m.parameters(), m_t.parameters(), rand_mut): mp.copy_(m_tp).add_(mut[i]) m.eval() """ 'GPU Tensor that stores fitness values & sorts from population. Would Ideally store in gpu shared memory' self.fit = torch.zeros(pop, device=self.device) self.sorted = torch.zeros(pop, device=self.device) self.hist_plot = [] self.lorentz_plot = []
from datareader import read_data import time import matplotlib as mpl from scipy import optimize, stats import pandas as pd N = 20 # rememebr to re-run sim when changing N newsim = False start = time.time() sol = [] rhomax = [1, 5, 10] omega = [0, 1, 0.1] epsilon = 1e-8 unsorted = list(read_data("datacomp.dat").values()) sol1 = unsorted[0:N] sol2 = unsorted[N:2 * N] sol3 = unsorted[2 * N:3 * N] for sol_ in [sol1, sol2, sol3]: trfs = np.array([a.transformations for a in sol_]) n = np.array([a.n for a in sol_]) sol.append([trfs, n, epsilon]) names = [ f"Buckling beam", r"Quantum 1, $\rho_{max} = $" + f"{rhomax[1]}", r"Quantum 2, $\rho_{max} = $" + f"{rhomax[2]}, $\omega = {omega[2]}$" ]
def analytical_comparison(n=np.arange(20, 151, 10), eps=12, rhomax=[1,5,5], omega=[0,1,[0.01, 0.5,1,5]], method=[0,1,2], sim=False, datafile="data.dat"): def analytic(n, method, r, omega_r): if method == 0: N = 1000 N = n + 1 d = 2 * N ** 2 vals = np.asarray([d * (1 - np.cos(j * np.pi / N)) for j in range(1, n + 1)]) vec = np.asarray([np.sin(i * np.pi / N) for i in range(1, n + 1)]) return vals, vec elif method == 1: vals = np.array([4 * i + 3 for i in range(n)]) return vals, np.zeros(n) elif method == 2: r0 = (2 * omega_r ** 2) ** (-1/3) V0 = 3/2 * (omega_r / 2) ** (2 / 3) we = np.sqrt(3) * omega_r vals = [V0 + we * (m + 0.5) for m in range(1,n+1)] vec = (we / np.pi) ** (1/4) * np.exp(-we / 2 * (r - r0) ** 2) return vals, vec if sim: print("Solving...") open(f"data/{datafile}", "w+").close() # clear file start = time.time() for met in method: W = omega[met] if "__iter__" not in dir(W): W = [W] for w in W: for _n in n: subprocess.run(f"./main.out {_n}{eps}{rhomax[met]}{met}{w} {_n} {eps} {rhomax[met]} {met} {w} {datafile}".split()) print(f"{round(100*(_n-n[0])/(n[-1]-n[0]), 2)} %, N: {_n}, eps: {eps}, method: {met}, rhomax: {rhomax[met]}, omega: {w}") print(f"Finished in {round(time.time() -start,3)} s.") sols = read_data(datafile) error = {} prob = {0: "Buckling Beam", 1: "Single Electron", 2: "Double Electron"} for met in method: fig, ax = plt.subplots(1,1,dpi=175, frameon=True) error[met] = [] print W = omega[met] if "__iter__" not in dir(W): W = [W] for w in W: error[met].append({"w": w, "err": []}) for _n in n: data = sols[f"{_n}{eps}{rhomax[met]}{met}{w}"] vals = data.eigvals vecs = data.eigvecs idx = np.argmin(vals) vec = vecs[:, idx] rho = np.linspace(0, data.pmax, _n) avals, avec = analytic(_n, met, rho, w) print(avec) print("AAAAAAAAa") avec /= np.linalg.norm(avec) # error[met][-1]["err"].append([err_val, err_vec]) fig, ax = plt.subplots(1,1,dpi=175, frameon=True) ax.plot(rho, vec, "r", lw=5, label=f"Eigenvector for {prob[met]}") print(rho) print() print(avec) ax.plot(rho, avec, "k", lw=5, label=f"Analytic eigenvector") plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame().set_facecolor('white') ax.set_xlabel("Matrix size, log(n)") ax.set_ylabel("Amplitude") ax.set_title(f"Eigenvector for {prob[met]}") plt.tight_layout() plt.show() fig, ax = plt.subplots(1,1,dpi=175, frameon=True) for met in range(3): run = error[met] for err in run: errs = np.asarray(err["err"]) if err["w"] in (5,): continue val, vec = errs.T # plt.plot(n, val, "--", lw=5, label=f"error in value, {prob[met]}, $\omega$={err['w']}") plt.plot(n, np.log10(vec), lw=5, label=f"{prob[met]}, $\omega$={err['w']}") plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(loc=8, fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame().set_facecolor('white') ax.set_xlabel("Matrix size, log(n)") ax.set_ylabel("total relative error") ax.set_title(f"Total relative error in eigenvectors") plt.tight_layout() plt.show() fig, ax = plt.subplots(1,1,dpi=175, frameon=True) for met in range(3): run = error[met] for err in run: errs = np.asarray(err["err"]) if err["w"] in (0.01,): continue val, vec = errs.T plt.plot(n, val, "--", lw=5, label=f"{prob[met]}, $\omega$={err['w']}") # plt.plot(n, vec, lw=5, label=f"error in vector, {prob[met]}, $\omega$={err['w']}") plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame().set_facecolor('white') ax.set_xlabel("Matrix size, log(n)") ax.set_ylabel("total relative error") ax.set_title(f"Total relative error in eigenvalues") plt.tight_layout() plt.show()
label=f"$\omega = {omega[i]}$, norm: {round(np.linalg.norm(vecs[:, i]),3)}" if sim: print("Solving ...") open(f"data/{datafile}", "w+").close() # clear file for method_ in method: if method_ != 0: print("Error for this has not been implemented") continue start = time.time() for _n in n: subprocess.run(f"./main.out {_n}{eps}{rhomax[method_]}{method_}{omega[method_]} {_n} {eps} {rhomax[method_]} {method_} {omega[method_]} {datafile}".split()) print(f"{round(100*(_n-n[0])/(n[-1]-n[0]), 2)} %, N: {_n}, eps: {eps}, method: {method_}, rhomax: {rhomax[method_]}, omega: {omega[method_]}") print(f"Finished in {round(time.time() -start,3)} s.") sols = read_data(datafile) beam_err = [] for method_ in method: if method_ != 0: print("Error for this has not been implemented") continue for _n in n: data = sols[f"{_n}{eps}{rhomax[method_]}{method_}{omega[method_]}"] vals = data.eigvals vecs = data.eigvecs vecs, vals = mat_sort_by_array(vecs, vals) avals, avecs = analytical_beam(_n, method_) err_vals = np.log10(abs(np.sum(abs(vals - avals) / avals)) / _n) err_vecs = np.log10(abs(np.sum(abs(vecs - avecs) / avecs)) / _n / _n)
def plotEigvalsQuantum1(ax, var="n", vars=[], eigcount=10, sim=False, datafile="data.dat", eps=16, **kwargs): """ In the paper we wish to plot the eigenvalues both as a function of varying n and varying rhomax. The variable that varies is passed under "var" and the actually values of the variable under "vars". var can be "n" or "rhomax" the static variable (opposite of the varying) must be passed as a kwarg, i.e 'var = "n", vars = range(1,10), rhomax=20' eigcount is how many eigenvalues do you want to plot. ax is the axis to be plotted on """ if sim: # run simulation if prompted print("Solving ...") open("data.dat", "w").close() # clear file start = time.time() if var == "n": for n in vars: subprocess.run( f"./main.out {n}{kwargs['rhomax']} {n} {eps} {kwargs['rhomax']} 1 1" .split()) print( f"N: {n}/{vars[-1]}, {round(100*(n-vars[0])/(vars[-1]-vars[0]), 2)} %" ) print(f"Done in {round(time.time()- start,3)} s") subprocess.run(f"cp data.dat {datafile}".split()) elif var == "rhomax": for rhomax in vars: subprocess.run( f"./main.out {kwargs['n']}{rhomax} {kwargs['n']} {eps} {rhomax} 1 1" .split()) print( f"ρ: {rhomax}/{vars[-1]}, {round(100*(rhomax-vars[0])/(vars[-1]-vars[0]), 2)} %" ) print(f"Done in {round(time.time()- start,3)} s") subprocess.run(f"cp data.dat {datafile}".split()) sols = read_data(datafile) colors = list(Color("red").range_to(Color("cyan"), len(vars))) #TO get a gradient effect colors = [str(color) for color in colors] analytical = np.array([4 * a + 3 for a in range(0, eigcount) ]) # the analytical eigenvalues indexes = np.arange(1, eigcount + 1) if var == "n": for i, n in enumerate(vars): ax.plot(indexes, np.sort(sols[f"{n}{kwargs['rhomax']}"].eigvals)[:eigcount], '-o', markersize=1, color=colors[i], lw=1.5, alpha=0.6) label = f"Matrix size, $n$" elif var == "rhomax": for i, rhomax in enumerate(vars): ax.plot(indexes, np.sort(sols[f"{kwargs['n']}{rhomax}"].eigvals)[:eigcount], '-o', markersize=1, color=colors[i], lw=1.5, alpha=0.6) label = r"$\rho_{max}$" ax.plot(indexes, analytical, label="Analytical", color="black", lw=2, alpha=0.8, linestyle="--") ax.set_xlabel("Index, $i$") ax.set_ylabel(r"$\lambda_{i}$") ax.set_ylim(top=analytical[-1] * 1.2, bottom=analytical[0] * 0.8) ax.set_xlim(1, eigcount) cmap = mpl.colors.ListedColormap(colors) norm = mpl.colors.Normalize(vmin=vars[0], vmax=vars[-1]) ax.figure.colorbar( mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax, orientation='horizontal', label=label, ticks=[int(a) for a in np.linspace(vars[0], vars[-1], 5)]) legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=0.4, frameon=True, loc='lower right') legend.get_frame().set_facecolor('white')
) pmaxs = np.linspace(0.5, 10, 50) N = 50 if newsimP: open("data/dataQuantumP.dat", "w+").close() for rhomax in pmaxs: for omega in omegas: subprocess.run( f"./main.out {rhomax}{omega} {N} {eps} {rhomax} 2 {omega} {'dataQuantumP.dat'}" .split()) print( f"rho: {rhomax}/{pmaxs[-1]} ω: {round(omega,2)}, {round(100*omega/omegas[-1],2)}%" ) sols = read_data("dataQuantum2.dat") solsp = read_data("dataQuantumP.dat") Ncolors = list(Color("red").range_to(Color("cyan"), len(omegas))) Ncolors = [str(color) for color in Ncolors] with plt.style.context("seaborn-darkgrid"): f, ax = plt.subplots(1, 1, dpi=175, frameon=True) ax.set_xlabel("$N$") ax.set_ylabel("Eigval") for N in Ns: #eigvals = np.zeros(len(omegas)) for i, omega in enumerate(omegas): eigval = np.sort(sols[str(N) + str(omega)].eigvals)[0] #print(f"eig: {round(eigval,2)}, N: {N}, ω:{omega}") ax.plot(N, eigval, 'o', color=Ncolors[i])
pmax = 20 eps = 12 newsim = False Nmax = 500 Nmin = 10 interval = 10 Nindexes = np.array(range(Nmin, Nmax, interval)) if newsim: open("data.dat", "w+").close() for n in Nindexes: subprocess.run(f"./main.out {n} {n} {eps} {pmax} 1 1".split()) print(f"N: {n}, {round(100*(n-Nmin)/(Nmax-Nmin), 2)} %") subprocess.run("cp data.dat dataFindAcc.dat".split()) data = read_data("dataFindAccBIG.dat") avals = np.array([3,7,11]) print(Nindexes) with plt.style.context("seaborn-darkgrid"): f, ax = plt.subplots(1, 1, dpi=100, frameon=True) ax.set_xlabel("Matrix size, N") ax.set_ylabel("No. of correct digits") vals = np.zeros(len(Nindexes)) for i,n in enumerate(Nindexes): vals[i] = -np.log10(1/3*np.sum(np.abs(np.sort(data[str(n)].eigvals)[:3]-avals))) f = lambda t,a,b: a*np.log10(t/b) popt, pcov = optimize.curve_fit(f, Nindexes[10:], vals[10:]) x = np.linspace(Nindexes[10], 3000, 2000)
def plotTransforms(Ns, ax, sim=False, datafile="data.dat", eps=12, pmax=[1, 10, 20], omega=[0, 1, 5]): # plots num of transformations against N on axis ax, as well as comparing to n**2 and n**2*ln(n) if sim: print("Solving ...") open("data.dat", "w").close() # clear file for method in [0, 1, 2]: start = time.time() for N in Ns: subprocess.run( f"./main.out {N}{eps}{pmax[method]}{method}{omega[method]} {N} {eps} {pmax[method]} {method} {omega[method]}" .split()) print( f"{round(100*(N-Ns[0])/(Ns[-1]-Ns[0]), 2)} %, N: {N}, eps: {eps}, method: {method}, pmax: {pmax[method]}, omega: {omega[method]}" ) print(f"Done in {round(time.time() -start,3)} s.") if datafile != "data.dat": # move content to designated file subprocess.run(f"cp data.dat {datafile}".split()) sols = read_data(datafile) #all solutions for method in [0, 1, 2]: # all methods are plotted trans = np.array([ sols[f"{N}{eps}{pmax[method]}{method}{omega[method]}"]. transformations for N in Ns ]) # array of counted transformations color = { 0: "red", 1: "blue", 2: "green" }[method] #every method gets unique color label = { 0: "Buckling beam", 1: r"Quantum 1, $\rho_{max} = $" + str(pmax[method]), 2: r"Quantum 2, $\rho_{max} = $" + f"{pmax[method]}, $\omega_r = {omega[method]}$" }[method] ax.plot(Ns, trans, "-o", markersize=2.5, color=color, alpha=0.8, lw=2.8, label=label) plt.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame() frame.set_facecolor('white') ax.set_xlabel("Matrix size, $n$") ax.set_ylabel("Transformations, $m$") print( f" eps: {eps}, method: {method}, pmax: {pmax[method]}, omega: {omega[method]}:" )
def main(N, tol): arma_Jac_err = [] analy_Jac_err = [] analy_arma_err = [] time_arma = [] time_Jac = [] for n in N: print("n = ", n) # Find solution using armadillo open("data/" + arma_file, "w").close() subprocess.run(f"./{arma_name} {n} {arma_file}".split()) # sort eigenvectormatrix according to increasing eigenvalue arma_vals, arma_vecs, time = read_arma() arma_vecs, arma_vals = mat_sort_by_array(arma_vecs.T, arma_vals) time_arma.append(time) # Find numerical solution open("data/" + Jacobi_file, "w").close() subprocess.run(f"./main.out tmp {n} {tol} 1 0 0 {Jacobi_file}".split()) # sort eigenvectormatrix according to increasing eigenvalue Jacobi = read_data(Jacobi_file)["tmp"] Jac_vecs, Jac_vals = mat_sort_by_array(Jacobi.eigvecs, Jacobi.eigvals) time_Jac.append(Jacobi.time) analy_vec, analy_val = analytical(n) # Compute total relative error in each eigenvector arma_Jac_err.append(error(arma_vecs, Jac_vecs)) analy_Jac_err.append(error(analy_vec, Jac_vecs)) analy_arma_err.append(error(analy_vec, arma_vecs)) with plt.style.context("seaborn-darkgrid"): f, ax = plt.subplots(1, 1, dpi=100, frameon=True) ax.plot( N, arma_Jac_err, "o", color="firebrick", label=r"$\delta_{rel}$ armadillo-Jacobi", ) ax.plot( N, analy_Jac_err, "o", color="forestgreen", label=r"$\delta_{rel}$ analytical-Jacobi", ) ax.plot( N, analy_arma_err, "o", color="mediumblue", label=r"$\delta_{rel}$ analytical-armadillo", ) mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) fig = plt.gcf() fig.set_size_inches((16, 11), forward=False) plt.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame() frame.set_facecolor('white') plt.title("Total relative difference between each method") plt.xlabel("N") plt.ylabel("relative difference") plt.legend() plt.show() with plt.style.context("seaborn-darkgrid"): f, ax = plt.subplots(1, 1, dpi=100, frameon=True) ax.plot(N, time_arma, "o", color="firebrick", label="armadillo") ax.plot(N, time_Jac, "o", color="forestgreen", label="Jacobi method") plt.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) plt.tick_params(top='on', bottom='on', left='on', right='on', labelleft='on', labelbottom='on') legend = ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True, fontsize="x-small") frame = legend.get_frame() frame.set_facecolor('white') plt.title("Time of solving") plt.xlabel("N") plt.ylabel("Time, [s]") plt.legend() plt.show()
for n in np.linspace(100, 450, N): subprocess.run( f"./main.out {method}{n} {n} {epsilon} {rhomax[method]} {method} {omega[method]}" .split()) print(f"Method: {method}: {round(100*(n-100)/250, 2)} %") print(f"{method} done in {round(time.time() -subtime,3)} s.") print(f"All done in {round(time.time() -start,3)} s.") # solutions = read_data() # trfs = np.array([a.transformations for a in solutions]) # n = np.array([a.n for a in solutions]) # eps = np.array([a.eps for a in solutions]) # sol.append([trfs, n, eps]) # print(sol) unsorted = list(read_data().values()) #print(unsorted) sol1 = unsorted[0:N] sol2 = unsorted[N:2 * N] sol3 = unsorted[2 * N:3 * N] for sol_ in [sol1, sol2, sol3]: trfs = np.array([a.transformations for a in sol_]) n = np.array([a.n for a in sol_]) #eps = np.array([a.eps for a in sol_]) sol.append([trfs, n, epsilon]) names = [ f"Buckling beam", r"Quantum 1, $\rho_{max} = $" + f"{rhomax[1]}", r"Quantum 2, $\rho_{max} = $" + f"{rhomax[2]}, $\omega = {omega[2]}$" ]
import numpy as np import pandas as pd from datareader import read_data import subprocess n = 100 eps = 12 open("data.dat", "w").close() rhoms = [3.613, 4.048, 4.456, 4.827] for rhom in rhoms: subprocess.run(f"./main.out {rhom} {n} {eps} {rhom} 1 1".split()) sols = read_data() avals = np.array([4 * i + 3 for i in range(4)]) data = { r"$\rho_{max}$": rhoms, "$\lambda_1$": [], "$\lambda_2$": [], "$\lambda_3$": [], "$\lambda_4$": [], r"$E_{rel}$": [] } for i in range(4): numvals = np.sort(sols[str(rhoms[i])].eigvals) data[r"$E_{rel}$"].append(np.sum(np.abs(numvals[:4] - avals) / avals)) for j in range(4): data[f"$\lambda_{j+1}$"].append(numvals[j]) df = pd.DataFrame(data) print("\n" + df.to_latex( index=False, float_format="%.6e",
def visualize_loss(model_dir): # Retrieve the flag object if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print("Retrieving flag object for parameters") flags = flagreader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode # Get the data # train_loader, test_loader = datareader.read_data(flags) train_loader, test_loader = datareader.read_data( x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=1, normalize_input=flags.normalize_input, data_dir=flags.data_dir, test_ratio=0.999, shuffle=False) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process ntwk.load() # load the model as constructed cuda = True if torch.cuda.is_available() else False if cuda: ntwk.model.cuda() ntwk.model.eval() # for param in ntwk.model.lin_w0.parameters(): # # print(param) # param.data += torch.rand(1).cuda()*0.001 # ntwk.model.lin_w0.weight.data[0] += torch.rand(1).cuda() * 0.01 # ntwk.model.lin_g.weight.data[0] += torch.rand(1).cuda() * 0.01 # weights = ntwk.model.linears[2].weight.cpu().data.numpy() # Get the weights # # Reshape the weights into a square dimension for plotting, zero padding if necessary # wmin = np.amin(np.asarray(weights.shape)) # wmax = np.amax(np.asarray(weights.shape)) # sq = int(np.floor(np.sqrt(wmin * wmax)) + 1) # diff = np.zeros((1, int(sq ** 2 - (wmin * wmax))), dtype='float64') # weights = weights.reshape((1, -1)) # weights = np.concatenate((weights, diff), axis=1) # # f = plt.figure(figsize=(10, 5)) # # c = plt.imshow(weights.reshape((sq, sq)), cmap=plt.get_cmap('viridis')) # # plt.colorbar(c, fraction=0.03) # f = plot_weights_3D(weights.reshape((sq, sq)), sq) # fig = plt.figure(figsize=[10,10]) # ax = fig.add_subplot(111) # cmp = plt.get_cmap('viridis') # c2 = ax.imshow(weights,aspect='auto',cmap=cmp,) # plt.colorbar(c2, fraction=0.03) # plt.grid(b=None) # plt.savefig('/home/omar/PycharmProjects/mlmOK_Pytorch/lin2.png') # # # 1D loop # loss_1D = np.empty(2*dim+1) # x = np.arange(-dim*dx, (dim+1)*dx, dx) # # ntwk.model.lin_w0.weight.data[wx2, wy2] += 5 # for i,j in enumerate(x): # print(str(i)+' of '+str(len(x))) # # print(ntwk.model.lin_w0.weight.data[2, 10]) # eval_loss = [] # # ntwk.model.lin_g.weight.data[wx, wy] += j # with torch.no_grad(): # for ind, (geometry, spectra) in enumerate(test_loader): # if cuda: # geometry = geometry.cuda() # spectra = spectra.cuda() # logit, w0, wp, g = ntwk.model(geometry) # loss = ntwk.make_custom_loss(logit, spectra[:, 12:]) # eval_loss.append(np.copy(loss.cpu().data.numpy())) # ntwk.model.lin_g.weight.data[wx, wy] -= j # eval_avg_loss = np.mean(eval_loss) # # print(str(j)) # # print(eval_avg_loss) # loss_1D[i] = eval_avg_loss ntwk.model.eval() print("Doing Evaluation on the model now") test_loss = [] with torch.no_grad(): for j, (geometry, spectra) in enumerate( ntwk.test_loader): # Loop through the eval set if cuda: geometry = geometry.cuda() spectra = spectra.cuda() # for n in [1,4,5,6,10,14,17,19,20,25]: # if j == n: # for in_geo in range(8): # geom = geometry.clone() # fig, ax = plt.subplots(1, figsize=(10, 5)) # frequency = ntwk.flags.freq_low + (ntwk.flags.freq_high - ntwk.flags.freq_low) / \ # ntwk.flags.num_spec_points * np.arange(ntwk.flags.num_spec_points) # ax.plot(frequency, spectra[0,:].cpu().data.numpy(), linewidth=5,label='Truth') # colors = ['orange','red', 'green', 'blue', 'purple'] # for k in range(5): # delta = k*0.1 # geom[0,in_geo] += delta # logit, w0, wp, g = ntwk.model(geom) # Get the output # # ax.plot(frequency,logit[0,:].cpu().data.numpy(),color=colors[k],label=str(np.round(delta,3))) # plt.xlabel("Frequency (THz)") # plt.ylabel("Transmission") # plt.legend(loc="lower left", frameon=False) # plt.savefig('/home/omar/PycharmProjects/mlmOK_Pytorch/Perturbation_in'+str(in_geo+1) # +'_0.1_Spectrum'+str(j)+'.png') # if n==25: # break for n in [1, 4, 5, 6, 10, 14, 17, 19, 20, 25]: if j == n: for in_geo in range(8): geom = geometry.clone() fig, ax = plt.subplots(1, figsize=(10, 5)) frequency = ntwk.flags.freq_low + (ntwk.flags.freq_high - ntwk.flags.freq_low) / \ ntwk.flags.num_spec_points * np.arange(ntwk.flags.num_spec_points) x = np.ones(ntwk.flags.num_lorentz_osc) marker_size = 14 logit, w0, wp, g = ntwk.model( geometry) # Get the output ax.plot(w0[0, :].cpu().data.numpy(), markersize=marker_size, color='orange', marker='o', fillstyle='full', linestyle='None', label='w_0') ax.plot(g[0, :].cpu().data.numpy(), markersize=marker_size, color='orange', marker='s', fillstyle='full', linestyle='None', label='g') ax.plot(wp[0, :].cpu().data.numpy(), markersize=marker_size, color='orange', marker='v', fillstyle='full', linestyle='None', label='w_p') colors = ['red', 'green', 'blue', 'purple'] for k in range(1, 5): delta = k * 0.1 geom[0, in_geo] += delta logit, w0, wp, g = ntwk.model(geom) ax.plot(w0[0, :].cpu().data.numpy(), markersize=marker_size, color=colors[k - 1], marker='o', fillstyle='full', linestyle='None') ax.plot(g[0, :].cpu().data.numpy(), markersize=marker_size, color=colors[k - 1], marker='s', fillstyle='full', linestyle='None') ax.plot(wp[0, :].cpu().data.numpy(), markersize=marker_size, color=colors[k - 1], marker='v', fillstyle='full', linestyle='None') plt.ylabel("Lorentzian parameter value") plt.legend(loc="upper left", frameon=False) plt.savefig( '/home/omar/PycharmProjects/mlmOK_Pytorch/Perturb_in' + str(in_geo + 1) + '_0.1_Params' + str(j) + '.png') if n == 25: break