def closure(f, a, u): if torch.is_grad_enabled(): optimizer1.zero_grad() a_pred = model1(f) a = a.reshape(N, D_out - 2) assert a_pred.shape == a.shape """ RECONSTRUCT SOLUTIONS """ u_pred = reconstruct(N, a_pred, lepolys) u = u.reshape(N, D_out) assert u_pred.shape == u.shape """ RECONSTRUCT ODE """ DE = ODE2(1E-1, u_pred, a_pred, lepolys, lepoly_x, lepoly_xx) f = f.reshape(N, D_out) assert DE.shape == f.shape """ COMPUTE LOSS """ loss1 = criterion2(a_pred, a) + criterion1(u_pred, u) + criterion1( DE, f) if loss1.requires_grad: loss1.backward() return a_pred, u_pred, DE, loss1
def closure(a, f, u, fn=fn): if torch.is_grad_enabled(): optimizer.zero_grad() a_pred = model(fn) if A != 0: loss_a = A*criterion_a(a_pred, a) else: loss_a = 0 if U != 0: u_pred = reconstruct(a_pred, phi) loss_u = U*criterion_u(u_pred, u) else: u_pred, loss_u = None, 0 if F != 0: f_pred = ODE2(EPSILON, u_pred, a_pred, phi_x, phi_xx, equation=EQUATION) loss_f = F*criterion_f(f_pred, f) else: f_pred, loss_f = None, 0 if WF != 0 and EQUATION != 'BurgersT': LHS, RHS = weak_form2(EPSILON, SHAPE, f, u_pred, a_pred, lepolys, phi, phi_x, equation=EQUATION, nbfuncs=NBFUNCS) # print("Nbfuncs:", NBFUNCS, "\nLHS:", LHS.shape, "\nRHS:", RHS.shape) if NBFUNCS == 1: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS, RHS), 0, 0 elif NBFUNCS == 2: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS[:,0,0], RHS[:,0,0]), WF*criterion_wf(LHS[:,1,0], RHS[:,1,0]), 0 elif NBFUNCS == 3: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS[:,0,0], RHS[:,0,0]), WF*criterion_wf(LHS[:,1,0], RHS[:,1,0]), WF*criterion_wf(LHS[:,2,0], RHS[:,2,0]) else: loss_wf1, loss_wf2, loss_wf3 = 0, 0, 0 # NET LOSS loss = loss_a + loss_u + loss_f + loss_wf1 + loss_wf2 + loss_wf3 if loss.requires_grad: loss.backward() return a_pred, u_pred, f_pred, loss_a, loss_u, loss_f, loss_wf1, loss_wf2, loss_wf3, loss
def closure(f, a, u): if torch.is_grad_enabled(): optimizer1.zero_grad() a_pred = model1(f) a = a.reshape(N, D_out - 2) assert a_pred.shape == a.shape """ RECONSTRUCT SOLUTIONS """ u_pred = reconstruct(N, a_pred, lepolys) u = u.reshape(N, D_out) assert u_pred.shape == u.shape """ RECONSTRUCT ODE """ # DE = ODE2(1E-1, u_pred, a_pred, lepolys, lepoly_x, lepoly_xx) f = f.reshape(N, D_out) # assert DE.shape == f.shape DE = None """ WEAK FORM """ LHS, RHS = weak_form1(1E-1, SHAPE, f, u_pred, a_pred, lepolys, lepoly_x) weak_form_loss = criterion1(LHS, RHS) """ COMPUTE LOSS """ loss = criterion1(a_pred, a) + weak_form_loss #+ criterion1(DE, f) if loss.requires_grad: loss.backward() return a_pred, u_pred, DE, loss
def closure(f, a, u, fn=fn): if torch.is_grad_enabled(): optim.zero_grad() a_pred = model(fn) # u_pred = model(fn) if A != 0: loss_a = A*criterion_a(a_pred, a) else: # a_pred = torch.zeros(BATCH_SIZE, D_in, D_out - 2).to(device) loss_a = 0 if U != 0: u_pred = reconstruct(a_pred, phi) loss_u = U*criterion_u(u_pred, u) else: u_pred, loss_u = None, 0 if F != 0: f_pred = ODE2(EPSILON, u_pred, a_pred, phi_x, phi_xx, equation=EQUATION) loss_f = F*criterion_f(f_pred, f) else: f_pred, loss_f = None, 0 if WF != 0 and EQUATION != 'BurgersT': LHS, RHS = weak_form2(EPSILON, SHAPE, f, u_pred, a_pred, lepolys, phi, phi_x, equation=EQUATION, nbfuncs=NBFUNCS) loss_wf = WF*criterion_wf(LHS, RHS) else: loss_wf = 0 loss = loss_a + loss_u + loss_f + loss_wf a_pred = a_pred.to('cpu').detach().numpy() # f_pred = f_pred.to('cpu').detach().numpy() u_pred = u_pred.to('cpu').detach().numpy() a = a.to('cpu').detach().numpy() u = u.to('cpu').detach().numpy() avg_l2_u = 0 for i in range(BATCH_SIZE): avg_l2_u += relative_l2(u_pred[i,0,:], u[i,0,:]) avg_l2_u /= BATCH_SIZE # print(avg_l2_u) return avg_l2_u, np.round(float(loss.to('cpu').detach()), 12)
def closure(a, f, u, fn=0): model.train() if torch.is_grad_enabled(): optimizer.zero_grad() a_pred = model(fn) loss_a = 0 u_pred = reconstruct(a_pred, phi) loss_u = U*criterion_u(u_pred, u) f_pred, loss_f = None, 0 if WF != 0: LHS, RHS = weak_form2(EPSILON, SHAPE, f, u_pred, a_pred, lepolys, phi, phi_x, equation=EQUATION, nbfuncs=NBFUNCS) if NBFUNCS == 1: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS, RHS), 0, 0 elif NBFUNCS == 2: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS[:,0,0], RHS[:,0,0]), WF*criterion_wf(LHS[:,1,0], RHS[:,1,0]), 0 elif NBFUNCS == 3: loss_wf1, loss_wf2, loss_wf3 = WF*criterion_wf(LHS[:,0,0], RHS[:,0,0]), WF*criterion_wf(LHS[:,1,0], RHS[:,1,0]), WF*criterion_wf(LHS[:,2,0], RHS[:,2,0]) else: loss_wf1, loss_wf2, loss_wf3 = 0, 0, 0 # NET LOSS loss = loss_a + loss_u + loss_f + loss_wf1 + loss_wf2 + loss_wf3 if loss.requires_grad: loss.backward() return a_pred, u_pred, f_pred, loss_a, loss_u, loss_f, loss_wf1, loss_wf2, loss_wf3, loss
def model_stats(path, kind='train', gparams=None): from torchsummary import summary red, blue, green, purple = color_scheme() TEST = {'color':red, 'marker':'o', 'linestyle':'none', 'markersize': 3} VAL = {'color':blue, 'marker':'o', 'linestyle':'solid', 'mfc':'none'} cwd = os.getcwd() if gparams == None: os.chdir(path) with open("parameters.txt", 'r') as f: text = f.readlines() f.close() from pprint import pprint os.chdir(cwd) for i, _ in enumerate(text): text[i] = _.rstrip('\n') gparams = {} for i, _ in enumerate(text): _ = _.split(':') k, v = _[0], _[1] try: gparams[k] = float(v) except: gparams[k] = v if gparams['model'] == 'ResNet': model = ResNet elif gparams['model'] == 'NetA': model = NetA elif gparams['model'] == 'NetB': model = NetB elif gparams['model'] == 'NetC': model = NetC elif gparams['model'] == 'NetD': model = NetD EQUATION, EPSILON, INPUT = gparams['equation'], gparams['epsilon'], gparams['file'] if kind == 'train': SIZE = int(gparams['file'].split('N')[0]) else: SIZE = 1000 FILE = f'{SIZE}N' + INPUT.split('N')[1] gparams['file'] = FILE if path != gparams['path']: index = path.index("training") def replace_line(file_name, text): os.chdir(path) lines = open(file_name, 'r').readlines() for i, _ in enumerate(lines): if 'path:' in _: line_num = i break lines[line_num] = 'path:' + text +'\n' out = open(file_name, 'w') out.writelines(lines) out.close() os.chdir(cwd) replace_line('parameters.txt', path[index:]) PATH = gparams['path'] KERNEL_SIZE = int(gparams['ks']) PADDING = (KERNEL_SIZE - 1)//2 SHAPE = int(FILE.split('N')[1]) + 1 BATCH_SIZE, D_in, Filters, D_out = SIZE, 1, int(gparams['filters']), SHAPE BLOCKS = int(gparams['blocks']) forcing = gparams['forcing'] try: mean, std = gparams['mean'], gparams['std'] norm = True transform_f = transforms.Normalize(mean, std) lg_dataset = get_data(gparams, kind=kind, transform_f=transform_f) except: norm = False lg_dataset = get_data(gparams, kind=kind) validateloader = torch.utils.data.DataLoader(lg_dataset, batch_size=BATCH_SIZE, shuffle=True) xx, lepolys, lepoly_x, lepoly_xx, phi, phi_x, phi_xx = basis_vectors(D_out, equation=EQUATION) # LOAD MODEL try: device = gparams['device'] except: device = get_device() model = model(D_in, Filters, D_out - 2, kernel_size=KERNEL_SIZE, padding=PADDING, blocks=BLOCKS).to(device) model.load_state_dict(torch.load(PATH + '/model.pt')) model.eval() summary(model) MAE_a, MSE_a, MinfE_a, MAE_u, MSE_u, MinfE_u, pwe_a, pwe_u = [], [], [], [], [], [], [], [] for batch_idx, sample_batch in enumerate(validateloader): f = sample_batch['f'].to(device) if norm == True: fn = sample_batch['fn'].to(device) else: fn = sample_batch['f'].to(device) u = sample_batch['u'].to(device) a = sample_batch['a'].to(device) a_pred = model(fn) u_pred = reconstruct(a_pred, phi) # a_pred = torch.zeros(BATCH_SIZE, D_in, D_out - 2).to(device) # u_pred = model(fn) # f_pred = ODE2(EPSILON, u_pred, a_pred, phi_x, phi_xx, equation=EQUATION) f_pred = torch.zeros(BATCH_SIZE, D_in, D_out).to(device) a_pred = a_pred.to('cpu').detach().numpy() u_pred = u_pred.to('cpu').detach().numpy() f_pred = f_pred.to('cpu').detach().numpy() a = a.to('cpu').detach().numpy() u = u.to('cpu').detach().numpy() for i in range(BATCH_SIZE): MAE_a.append(mae(a_pred[i,0,:], a[i,0,:])) MSE_a.append(relative_l2(a_pred[i,0,:], a[i,0,:])) MinfE_a.append(linf(a_pred[i,0,:], a[i,0,:])) MAE_u.append(mae(u_pred[i,0,:], u[i,0,:])) MSE_u.append(relative_l2(u_pred[i,0,:], u[i,0,:])) MinfE_u.append(linf(u_pred[i,0,:], u[i,0,:])) pwe_a.append(np.round(a_pred[i,0,:] - a[i,0,:], 9)) pwe_u.append(np.round(u_pred[i,0,:] - u[i,0,:], 9)) values = { 'MAE_a': MAE_a, 'MSE_a': MSE_a, 'MinfE_a': MinfE_a, 'MAE_u': MAE_u, 'MSE_u': MSE_u, 'MinfE_u': MinfE_u, 'PWE_a': pwe_a, 'PWE_u': pwe_u } df = pd.DataFrame(values) os.chdir(path) df.to_csv('out_of_sample.csv') try: df2 = pd.DataFrame(gparams['losses']) df2.to_csv('losses.csv') except: pass sns.pairplot(df, corner=True, diag_kind="kde", kind="reg") plt.savefig('confusion_matrix.pdf', bbox_inches='tight', dpi=300) # plt.show() plt.close() rosetta = { 'MAE_a': 'MAE', 'MSE_a': 'Rel. $\\ell^{2}$', 'MinfE_a': '$\\ell^{\\infty}$', 'MAE_u': 'MAE', 'MSE_u': 'Rel. $\\ell^{2}$', 'MinfE_u': '$\\ell^{\\infty}$', } columns = df.columns columns = columns[:-2] plt.figure(2, figsize=(14, 4)) plt.suptitle("Error Histograms") for i, col in enumerate(columns[:-3]): if col in ('PWE_a', 'PWE_u'): continue plt.subplot(1, 3, i+1) sns.distplot(df[[f'{col}']], kde=False, color=blue) plt.grid(alpha=0.618) # plt.xlabel(f'{col}') plt.title(rosetta[f'{col}']) if i == 0: plt.ylabel('Count') else: plt.ylabel('') plt.xlim(0, df[f'{col}'].max()) plt.xticks(rotation=90) plt.savefig('histogram_alphas.pdf', bbox_inches='tight', dpi=300) # plt.show() plt.close(2) plt.figure(3, figsize=(14, 4)) plt.suptitle("Error Histograms") for i, col in enumerate(columns[-3:]): if col in ('PWE_a', 'PWE_u'): continue plt.subplot(1, 3, i+1) sns.distplot(df[[f'{col}']], kde=False, color=blue) plt.grid(alpha=0.618) # plt.xlabel(f'{col}') plt.title(rosetta[f'{col}']) if i == 0: plt.ylabel('Count') else: plt.ylabel('') plt.xlim(0, df[f'{col}'].max()) plt.xticks(rotation=90) plt.savefig('histogram_solutions.pdf', bbox_inches='tight', dpi=300) # plt.show() plt.close(3) if gparams['model'] == 'ResNet' and gparams['blocks'] == 0: title = 'Linear' else: title = gparams['model'] # out_of_sample(EQUATION, SHAPE, a_pred, u_pred, f_pred, sample_batch, '.', title) # try: # loss_plot(gparams) # except: # print("Could not create loss plots.") os.chdir(cwd) return values
Filters, D_out, kernel_size=KERNEL_SIZE, padding=PADDING).to(device) model.load_state_dict(torch.load('./model.pt')) model.eval() running_MAE_a, running_MAE_u, running_MSE_a, running_MSE_u, running_MinfE_a, running_MinfE_u = 0, 0, 0, 0, 0, 0 for batch_idx, sample_batch in enumerate(testloader): f = Variable(sample_batch['f']).to(device) u = Variable(sample_batch['u']).to(device) a = Variable(sample_batch['a']).to(device) a_pred = model(f) a = a.reshape(N, D_out - 2) assert a_pred.shape == a.shape u_pred = reconstruct(N, a_pred, lepolys) u = u.reshape(N, D_out) assert u_pred.shape == u.shape DE = ODE2(1E-1, u_pred, a_pred, lepolys, lepoly_x, lepoly_xx) f = f.reshape(N, D_out) # f = f[:,1:31] assert DE.shape == f.shape a_pred = a_pred.to('cpu').detach().numpy() u_pred = u_pred.to('cpu').detach().numpy() a = a.to('cpu').detach().numpy() u = u.to('cpu').detach().numpy() for i in range(N): running_MAE_a += mae(a_pred[i, :], a[i, :]) running_MSE_a += relative_l2(a_pred[i, :], a[i, :]) running_MinfE_a += relative_linf(a_pred[i, :], a[i, :]) running_MAE_u += mae(u_pred[i, :], u[i, :])