def train(model,device,params,optimizer,scheduler): model.train() data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device) data1.requires_grad = True data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device) for step in range(params["trainStep"]-params["preStep"]): output1 = model(data1) model.zero_grad() dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0] dfdx20 = torch.autograd.grad(dfdx[:,0].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,0].unsqueeze(1) dfdx21 = torch.autograd.grad(dfdx[:,1].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,1].unsqueeze(1) dfdx22 = torch.autograd.grad(dfdx[:,2].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,2].unsqueeze(1) dfdx23 = torch.autograd.grad(dfdx[:,3].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,3].unsqueeze(1) dfdx24 = torch.autograd.grad(dfdx[:,4].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,4].unsqueeze(1) dfdx25 = torch.autograd.grad(dfdx[:,5].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,5].unsqueeze(1) dfdx26 = torch.autograd.grad(dfdx[:,6].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,6].unsqueeze(1) dfdx27 = torch.autograd.grad(dfdx[:,7].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,7].unsqueeze(1) dfdx28 = torch.autograd.grad(dfdx[:,8].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,8].unsqueeze(1) dfdx29 = torch.autograd.grad(dfdx[:,9].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,9].unsqueeze(1) # Loss function 1 fTerm = ffun(data1).to(device) loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\ (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)) # Loss function 2 output2 = model(data2) target2 = exact(params["radius"],data2) loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"]) loss = loss1+loss2 if step%params["writeStep"] == params["writeStep"]-1: with torch.no_grad(): target = exact(params["radius"],data1) error = errorFun(output1,target,params) # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item())) print("Error at Step %s is %s."%(step+params["preStep"]+1,error)) file = open("lossData.txt","a") file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n") if step%params["sampleStep"] == params["sampleStep"]-1: data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device) data1.requires_grad = True data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device) if 10*(step+1)%params["trainStep"] == 0: print("%s%% finished..."%(100*(step+1)//params["trainStep"])) loss.backward() optimizer.step() scheduler.step()
def preTrain(model, device, params, preOptimizer, preScheduler, fun): model.train() file = open("lossData.txt", "w") for step in range(params["preStep"]): # The volume integral data = torch.from_numpy( generateData.sampleFromDisk10( params["radius"], params["bodyBatch"])).float().to(device) output = model(data) target = fun(params["radius"], data) loss = output - target loss = torch.mean(loss * loss) if step % params["writeStep"] == params["writeStep"] - 1: with torch.no_grad(): ref = exact(params["radius"], data) error = errorFun(output, ref, params) # print("Loss at Step %s is %s."%(step+1,loss.item())) print("Error at Step %s is %s." % (step + 1, error)) file.write(str(step + 1) + " " + str(error) + "\n") model.zero_grad() loss.backward() # Update the weights. preOptimizer.step()
def test(model,device,params): numQuad = params["numQuad"] data = torch.from_numpy(generateData.sampleFromDisk10(1,numQuad)).float().to(device) output = model(data) target = exact(params["radius"],data).to(device) error = output-target error = math.sqrt(torch.mean(error*error)) # Calculate the L2 norm error. ref = math.sqrt(torch.mean(target*target)) return error/ref
def train(model, device, params, optimizer, scheduler): model.train() data1 = torch.from_numpy( generateData.sampleFromDisk10(params["radius"], params["bodyBatch"])).float().to(device) data2 = torch.from_numpy( generateData.sampleFromSurface10( params["radius"], params["bdryBatch"])).float().to(device) x_shift = torch.from_numpy(np.eye(10) * params["diff"]).float().to(device) data1_shift0 = data1 + x_shift[0] data1_shift1 = data1 + x_shift[1] data1_shift2 = data1 + x_shift[2] data1_shift3 = data1 + x_shift[3] data1_shift4 = data1 + x_shift[4] data1_shift5 = data1 + x_shift[5] data1_shift6 = data1 + x_shift[6] data1_shift7 = data1 + x_shift[7] data1_shift8 = data1 + x_shift[8] data1_shift9 = data1 + x_shift[9] data1_nshift0 = data1 - x_shift[0] data1_nshift1 = data1 - x_shift[1] data1_nshift2 = data1 - x_shift[2] data1_nshift3 = data1 - x_shift[3] data1_nshift4 = data1 - x_shift[4] data1_nshift5 = data1 - x_shift[5] data1_nshift6 = data1 - x_shift[6] data1_nshift7 = data1 - x_shift[7] data1_nshift8 = data1 - x_shift[8] data1_nshift9 = data1 - x_shift[9] for step in range(params["trainStep"] - params["preStep"]): output1 = model(data1) output1_shift0 = model(data1_shift0) output1_shift1 = model(data1_shift1) output1_shift2 = model(data1_shift2) output1_shift3 = model(data1_shift3) output1_shift4 = model(data1_shift4) output1_shift5 = model(data1_shift5) output1_shift6 = model(data1_shift6) output1_shift7 = model(data1_shift7) output1_shift8 = model(data1_shift8) output1_shift9 = model(data1_shift9) output1_nshift0 = model(data1_nshift0) output1_nshift1 = model(data1_nshift1) output1_nshift2 = model(data1_nshift2) output1_nshift3 = model(data1_nshift3) output1_nshift4 = model(data1_nshift4) output1_nshift5 = model(data1_nshift5) output1_nshift6 = model(data1_nshift6) output1_nshift7 = model(data1_nshift7) output1_nshift8 = model(data1_nshift8) output1_nshift9 = model(data1_nshift9) dfdx20 = (output1_shift0 + output1_nshift0 - 2 * output1) / (params["diff"]**2) dfdx21 = (output1_shift1 + output1_nshift1 - 2 * output1) / (params["diff"]**2) dfdx22 = (output1_shift2 + output1_nshift2 - 2 * output1) / (params["diff"]**2) dfdx23 = (output1_shift3 + output1_nshift3 - 2 * output1) / (params["diff"]**2) dfdx24 = (output1_shift4 + output1_nshift4 - 2 * output1) / (params["diff"]**2) dfdx25 = (output1_shift5 + output1_nshift5 - 2 * output1) / (params["diff"]**2) dfdx26 = (output1_shift6 + output1_nshift6 - 2 * output1) / (params["diff"]**2) dfdx27 = (output1_shift7 + output1_nshift7 - 2 * output1) / (params["diff"]**2) dfdx28 = (output1_shift8 + output1_nshift8 - 2 * output1) / (params["diff"]**2) dfdx29 = (output1_shift9 + output1_nshift9 - 2 * output1) / (params["diff"]**2) model.zero_grad() # Loss function 1 fTerm = ffun(data1).to(device) loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\ (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)) # Loss function 2 output2 = model(data2) target2 = exact(params["radius"], data2) loss2 = torch.mean((output2 - target2) * (output2 - target2) * params["penalty"] * params["area"]) loss = loss1 + loss2 if step % params["writeStep"] == params["writeStep"] - 1: with torch.no_grad(): target = exact(params["radius"], data1) error = errorFun(output1, target, params) # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item())) print("Error at Step %s is %s." % (step + params["preStep"] + 1, error)) file = open("lossData.txt", "a") file.write( str(step + params["preStep"] + 1) + " " + str(error) + "\n") if step % params["sampleStep"] == params["sampleStep"] - 1: data1 = torch.from_numpy( generateData.sampleFromDisk10( params["radius"], params["bodyBatch"])).float().to(device) data2 = torch.from_numpy( generateData.sampleFromSurface10( params["radius"], params["bdryBatch"])).float().to(device) data1_shift0 = data1 + x_shift[0] data1_shift1 = data1 + x_shift[1] data1_shift2 = data1 + x_shift[2] data1_shift3 = data1 + x_shift[3] data1_shift4 = data1 + x_shift[4] data1_shift5 = data1 + x_shift[5] data1_shift6 = data1 + x_shift[6] data1_shift7 = data1 + x_shift[7] data1_shift8 = data1 + x_shift[8] data1_shift9 = data1 + x_shift[9] data1_nshift0 = data1 - x_shift[0] data1_nshift1 = data1 - x_shift[1] data1_nshift2 = data1 - x_shift[2] data1_nshift3 = data1 - x_shift[3] data1_nshift4 = data1 - x_shift[4] data1_nshift5 = data1 - x_shift[5] data1_nshift6 = data1 - x_shift[6] data1_nshift7 = data1 - x_shift[7] data1_nshift8 = data1 - x_shift[8] data1_nshift9 = data1 - x_shift[9] if 10 * (step + 1) % params["trainStep"] == 0: print("%s%% finished..." % (100 * (step + 1) // params["trainStep"])) loss.backward() optimizer.step() scheduler.step()
def train(model,device,params,optimizer,scheduler): model.train() data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device) data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device) x_shift = torch.from_numpy(np.eye(10)*params["diff"]).float().to(device) data1_shift0 = data1+x_shift[0] data1_shift1 = data1+x_shift[1] data1_shift2 = data1+x_shift[2] data1_shift3 = data1+x_shift[3] data1_shift4 = data1+x_shift[4] data1_shift5 = data1+x_shift[5] data1_shift6 = data1+x_shift[6] data1_shift7 = data1+x_shift[7] data1_shift8 = data1+x_shift[8] data1_shift9 = data1+x_shift[9] for step in range(params["trainStep"]-params["preStep"]): output1 = model(data1) output1_shift0 = model(data1_shift0) output1_shift1 = model(data1_shift1) output1_shift2 = model(data1_shift2) output1_shift3 = model(data1_shift3) output1_shift4 = model(data1_shift4) output1_shift5 = model(data1_shift5) output1_shift6 = model(data1_shift6) output1_shift7 = model(data1_shift7) output1_shift8 = model(data1_shift8) output1_shift9 = model(data1_shift9) dfdx0 = (output1_shift0-output1)/params["diff"] # Use difference to approximate derivatives. dfdx1 = (output1_shift1-output1)/params["diff"] dfdx2 = (output1_shift2-output1)/params["diff"] dfdx3 = (output1_shift3-output1)/params["diff"] dfdx4 = (output1_shift4-output1)/params["diff"] dfdx5 = (output1_shift5-output1)/params["diff"] dfdx6 = (output1_shift6-output1)/params["diff"] dfdx7 = (output1_shift7-output1)/params["diff"] dfdx8 = (output1_shift8-output1)/params["diff"] dfdx9 = (output1_shift9-output1)/params["diff"] model.zero_grad() # Loss function 1 fTerm = ffun(data1).to(device) loss1 = torch.mean(0.5*(dfdx0*dfdx0 + dfdx1*dfdx1 + dfdx2*dfdx2 +\ dfdx3*dfdx3 + dfdx4*dfdx4 + dfdx5*dfdx5 + dfdx6*dfdx6 +\ dfdx7*dfdx7 + dfdx8*dfdx8 + dfdx9*dfdx9)-fTerm*output1) # Loss function 2 output2 = model(data2) target2 = exact(params["radius"],data2) loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"]) loss = loss1+loss2 if step%params["writeStep"] == params["writeStep"]-1: with torch.no_grad(): target = exact(params["radius"],data1) error = errorFun(output1,target,params) # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item())) print("Error at Step %s is %s."%(step+params["preStep"]+1,error)) file = open("lossData.txt","a") file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n") if step%params["sampleStep"] == params["sampleStep"]-1: data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device) data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device) data1_shift0 = data1+x_shift[0] data1_shift1 = data1+x_shift[1] data1_shift2 = data1+x_shift[2] data1_shift3 = data1+x_shift[3] data1_shift4 = data1+x_shift[4] data1_shift5 = data1+x_shift[5] data1_shift6 = data1+x_shift[6] data1_shift7 = data1+x_shift[7] data1_shift8 = data1+x_shift[8] data1_shift9 = data1+x_shift[9] if 10*(step+1)%params["trainStep"] == 0: print("%s%% finished..."%(100*(step+1)//params["trainStep"])) loss.backward() optimizer.step() scheduler.step()