def CalculateSolution(Pe): # Create the TFC Class: N = 200 m = 190 nC = 2 tfc = utfc(N, nC, m, basis='LeP', x0=0., xf=1.) x = tfc.x # Get the Chebyshev polynomials H = tfc.H H0 = H(np.array([0.])) Hf = H(np.array([1.])) # Create the constraint expression and its derivatives y = lambda x, xi: np.dot(H(x), xi) + (1. - x) * (1. - np.dot(H0, xi) ) - x * np.dot(Hf, xi) yd = egrad(y) ydd = egrad(yd) L = lambda xi: ydd(x, xi) - Pe * yd(x, xi) # Calculate the solution zXi = np.zeros(H(x).shape[1]) xi, it = NLLS(zXi, L) # Create the test set: N = 1000 xTest = np.linspace(0., 1., N) err = np.abs(y(xTest, xi) - soln(xTest, Pe)) return np.max(err), np.mean(err)
def BVP_tfc(N, m, basis, iterMax, tol): ## Unpack Paramters: ********************************************************* x0 = 0. xf = np.pi ## Initial Conditions: ******************************************************* y0 = 0. yf = 0. nC = 2 # number of constraints ## Determine call tfc class needs to be 1 for ELMs if basis == 'CP' or basis == 'LeP': c = 2. / (xf - x0) elif basis == 'FS': c = 2. * np.pi / (xf - x0) else: c = 1. / (xf - x0) ## Compute true solution ytrue = lambda x: np.exp(-x) * np.sin(x) err = onp.ones_like(m) * np.nan res = onp.ones_like(m) * np.nan ## GET CHEBYSHEV VALUES: ********************************************* tfc = utfc(N, nC, int(m), basis=basis, x0=x0, xf=xf) x = tfc.x H = tfc.H H0 = H(tfc.x[0]) Hf = H(tfc.x[-1]) ## DEFINE THE ASSUMED SOLUTION: ************************************* phi1 = lambda x: (np.pi - x) / np.pi phi2 = lambda x: x / np.pi f = lambda x: np.exp(-2. * x) * np.sin(x) * (np.cos(x) - np.sin( x)) - 2. * np.exp(-x) * np.cos(x) y = lambda x, xi: np.dot(H(x), xi) + phi1(x) * (y0 - np.dot( H0, xi)) + phi2(x) * (yf - np.dot(Hf, xi)) yp = egrad(y) ypp = egrad(yp) ## DEFINE LOSS AND JACOB ******************************************** L = lambda xi: ypp(x, xi) + y(x, xi) * yp(x, xi) - f(x) ## SOLVE THE SYSTEM ************************************************* xi = onp.zeros(H(x).shape[1]) xi, _, time = NLLS(xi, L, timer=True, maxIter=iterMax) ## COMPUTE ERROR AND RESIDUAL *************************************** err = onp.linalg.norm(y(x, xi) - ytrue(x)) res = onp.linalg.norm(L(xi)) return err, res, time
def IVP2BVP(N, m, gamma, basis, iterMax, tol): ## Unpack Paramters: ********************************************************* x0 = -1. xf = 1. ## Initial Conditions: ******************************************************* y0 = -2. y0p = -2. yf = 2. nC = 2 # number of constraints ## Determine call tfc class needs to be 1 for ELMs if basis == 'CP' or basis == 'LeP': c = 2./ (xf - x0) elif basis == 'FS': c = 2. * np.pi / (xf - x0) else: c = 1./ (xf - x0) ## GET CHEBYSHEV VALUES: ********************************************* tfc = utfc(N,nC,m,basis = basis, x0=-1., xf=1.) x = tfc.x H = tfc.H dH = tfc.dH H0 = H(tfc.z[0]) Hf = H(tfc.z[-1]) H0p = dH(tfc.z[0]) ## DEFINE THE ASSUMED SOLUTION: ************************************* phi1 = lambda a: 1./(1. + 4.*gamma - gamma**2) * ( (1. + gamma) - 2.*gamma*a ) phi2 = lambda a: 1./(1. + 4.*gamma - gamma**2) * ( (1. - gamma)**2 + (1. - gamma**2)*a) phi3 = lambda a: 1./(1. + 4.*gamma - gamma**2) * ( -gamma*(gamma-3.) + 2.*gamma*a ) y = lambda x, xi: np.dot(H(x),xi) + phi1(x)*(y0 - np.dot(H0,xi)) \ + phi2(x)*(y0p - np.dot(H0p,xi)) \ + phi3(x)*(yf - np.dot(Hf,xi)) yp = egrad(y,0) ypp = egrad(yp,0) ## DEFINE LOSS AND JACOB ******************************************** L = lambda xi: ypp(x,xi) + (np.cos(3.*x**2) -3.*x + 1.)*yp(x,xi) \ + (6.*np.sin(4.*x**2) - np.exp(np.cos(3.*x)))*y(x,xi) \ - 2. * (1.-np.sin(3.*x))*(3.*x-np.pi)/(4.-x) ## SOLVE THE SYSTEM ************************************************* xi = onp.zeros(H(x).shape[1]) xi,_,_ = NLLS(xi,L,timer=True) return y(x,xi), L(xi), x
def test_ODE(): # This script will solve the non-linear differential equation # of the form: y''+f(t)*y*y' = f2(t) # Constants used in the differential equation: f = lambda t: np.ones(t.shape) f2 = lambda t: np.exp(-2. * t) * np.sin(t) * (np.cos(t) - np.sin( t)) - 2. * np.exp(-t) * np.cos(t) ti = 0. tf = np.pi yi = 0. yf = 0. # Real analytical solution: real = lambda t: np.exp(-t) * np.sin(t) # Create the ToC Class: N = 100 m = 30 nC = 2 tfc = TFC(N, nC, m, x0=ti, xf=tf, basis='LeP') t = tfc.x # Get the Chebyshev polynomials H = tfc.H dH = tfc.dH Zero = np.zeros_like(t) End = tf * np.ones_like(t) H0 = H(Zero) Hd0 = dH(Zero) Hf = H(End) # Create the constraint expression and its derivatives beta0 = lambda t: (t - tf) / (ti - tf) beta1 = lambda t: (ti - t) / (ti - tf) y = lambda t, xi: np.dot(H(t), xi) + beta0(t) * (yi - np.dot( H0, xi)) + beta1(t) * (yf - np.dot(Hf, xi)) yd = egrad(y) ydd = egrad(yd) # Create the residual and jacobians r = lambda xi, t: ydd(t, xi) + f(t) * y(t, xi) * yd(t, xi) - f2(t) xi = np.zeros(H(t).shape[1]) xi, it = NLLS(xi, r, t, constant_arg_nums=[1]) assert (np.max(np.abs(r(xi, t))) < 1e-10)
def test_PDE(): ## TFC Parameters maxIter = 10 tol = 1e-13 # Constants and switches: n = 20 m = 20 x0 = np.array([0., 0.]) xf = np.array([1., 1.]) # Real analytical solution: real = lambda x, y: y**2 * np.sin(np.pi * x) # Create the TFC Class: N = np.array([n, n]) nC = np.array([2, 2]) tfc = TFC(N, nC, m, x0=x0, xf=xf, dim=2, basis='CP') x = tfc.x Zero = np.zeros_like(x[0]) One = np.ones_like(x[0]) # Get the basis functions H = tfc.H Hy = tfc.Hy z1 = lambda xi, *x: np.dot(H(*x), xi) - (1. - x[0]) * np.dot( H(*(Zero, x[1])), xi) - x[0] * np.dot(H(*(One, x[1])), xi) z = lambda xi, *x: z1(xi, *x) - z1(xi, x[0], Zero) + x[1] * (2. * np.sin( np.pi * x[0]) - egrad(z1, 2)(xi, x[0], One)) # Create the residual zxx = egrad(egrad(z, 1), 1) zyy = egrad(egrad(z, 2), 2) zy = egrad(z, 2) r = lambda xi, *x: zxx(xi, *x) + zyy(xi, *x) + z(xi, *x) * zy( xi, *x) - np.sin(np.pi * x[0]) * (2. - np.pi**2 * x[1]**2 + 2. * x[1]** 3 * np.sin(np.pi * x[0])) xi = np.zeros(H(*x).shape[1]) xi, it = NLLS(xi, r, *x, constant_arg_nums=[1, 2]) zr = real(x[0], x[1]) ze = z(xi, *x) err = zr - ze maxErr = np.max(np.abs(err)) assert (maxErr < 1e-10)
- f(xs2) L = jit(lambda xi: np.hstack((L1(xi), L2(xi)))) ## SOLVE THE SYSTEM ************************************************* xi1 = onp.zeros(Hs1(xs1).shape[1]) xi2 = onp.zeros(Hs2(xs2).shape[1]) m = (yf - y0) / (xf - x0) y1 = onp.ones(1) * (m * xs1[-1] + y0) y1d = onp.ones(1) * m xi0 = TFCDict({'xi1': xi1, 'xi2': xi2, 'y1': y1, 'y1d': y1d}) xi = TFCDict({'xi1': xi1, 'xi2': xi2, 'y1': y1, 'y1d': y1d}) xi, it, time = NLLS(xi, L, timer=True) ## COMPUTE ERROR AND RESIDUAL *************************************** x = np.hstack((xs1, xs2)) yinit = np.hstack((ys1(xs1, xi0), ys2(xs2, xi0))) y = np.hstack((ys1(xs1, xi), ys2(xs2, xi))) yp = np.hstack((yps1(xs1, xi), yps2(xs2, xi))) err = onp.abs(y - ytrue(x)) res = onp.abs(L(xi)) print() print('Max Error: ' + str(np.max(err))) print('Max Loss: ' + str(np.max(res))) print('Computation time [ms]: ' + str(time * 1000))
Lu = lambda z, xi: -xi['b']**2 * up(z, xi['xi_u']) - beta * x(z, xi[ 'xi_x']) + alfa * u(z, xi['xi_u']) H = lambda z, xi: 0.5 * x(z, xi['xi_x'])**2 - 0.5 * u(z, xi[ 'xi_u'])**2 - alfa / beta * x(z, xi['xi_x']) * u(z, xi['xi_u']) Lf = lambda z, xi: H(z, xi)[-1] L = lambda xi: np.hstack((Lx(z, xi), Lu(z, xi), H(z, xi))) xi_x = onp.zeros(Hx(z).shape[1]) xi_u = onp.zeros(Hu(z).shape[1]) b = onp.ones(1) * np.sqrt(2.) xi = TFCDict({'xi_x': xi_x, 'xi_u': xi_u, 'b': b}) ## SOLVE THE SYSTEM ************************************************* xi, it, time = NLLS(xi, L, timer=True, tol=tol, maxIter=iterMax) t = (z - z[0]) / xi['b']**2 X = x(z, xi['xi_x']) U = u(z, xi['xi_u']) Ham = onp.zeros(len(t)) int = onp.zeros(len(t)) for i in range(0, len(t)): int[i] = 0.5 * (X[i]**2 + U[i]**2) Ham[i] = int[i] + -U[i] / beta * (alfa * X[i] + beta * U[i]) cost = simps(int, t) tf = 2. / xi['b']**2
# Create constrained expression: g = lambda th, xi: np.dot(H(th), xi) r = lambda th,xi: g(th,xi)+\ (th-thf)/(th0-thf)*(r0-g(th0*np.ones_like(th),xi))+\ (th-th0)/(thf-th0)*(rf-g(thf*np.ones_like(th),xi)) # Create loss function: dr = egrad(r) d2r = egrad(dr) L = lambda xi: -r(th,xi)**2*(dr(th,xi)*np.tan(th)+2.*d2r(th,xi))+\ -np.tan(th)*dr(th,xi)**3+3.*r(th,xi)*dr(th,xi)**2+r(th,xi)**3 # Solve the problem: xi = np.zeros(H(th).shape[1]) xi, _, time = NLLS(xi, L, timer=True) # Print out statistics: print("Solution time: {0} seconds".format(time)) # Plot the solution and residual p = MakePlot([r"$y$"], [r"$x$"]) p.ax[0].plot(r(th, xi) * np.sin(th), r(th, xi) * np.cos(th), "k") p.ax[0].axis("equal") p.ax[0].grid(True) p.ax[0].invert_yaxis() p.PartScreen(8, 7) p.show() p2 = MakePlot([r"$\theta$"], [r"$L$"]) p2.ax[0].plot(th,
ft = egrad(f,3) # Create the residual and jacobian L1 = lambda xiu,xiv,*x: fx(xiu,*x)+fy(xiv,*x) L2 = lambda xiu,xiv,*x: rho*(ft(xiu,*x)+f(xiu,*x)*fx(xiu,*x)+f(xiv,*x)*fy(xiu,*x))+P-mu*(f2x(xiu,*x)+f2y(xiu,*x)) L3 = lambda xiu,xiv,*x: rho*(ft(xiv,*x)+f(xiu,*x)*fx(xiv,*x)+f(xiv,*x)*fy(xiv,*x))-mu*(f2x(xiv,*x)+f2y(xiv,*x)) L = lambda xi: np.hstack([L1(xi['xiu'],xi['xiv'],*x),L2(xi['xiu'],xi['xiv'],*x),L3(xi['xiu'],xi['xiv'],*x)]) # Calculate the xi values M = H(*x).shape[1] xiu = np.zeros(M) xiv = np.zeros(M) xi = TFCDict({'xiu':xiu,'xiv':xiv}) if xTfc: xi,it,time = NLLS(xi,L,maxIter=maxIter,method='lstsq',timer=True) else: xi,it,time = NLLS(xi,L,maxIter=maxIter,method='pinv',timer=True) xiu = xi['xiu']; xiv = xi['xiv'] # Calcualte u and plot for different times n = 100 X = onp.matlib.repmat(onp.reshape(onp.linspace(0,xf[0],num=n),(n,1)),n,1).flatten() Y = onp.reshape(onp.matlib.repmat(onp.reshape(onp.linspace(-Hb/2.,Hb/2.,num=n),(n,1)),1,n),(n**2,1)).flatten() xTest = onp.zeros((3,n**2*3)) xTest[0,:] = onp.hstack([X,]*3) xTest[1,:] = onp.hstack([Y,]*3) xTest[2,:] = onp.hstack([onp.ones(n**2)*0.01,onp.ones(n**2)*0.1,onp.ones(n**2)*tf]) p = []; U = []; vals = [0.01,0.1,tf]
def runLaneEmden(N, m, basis, k, xf): ## user defined parameters: ************************************************************************ # N - number of discretization points # m - number of basis function terms # basis - basis function type # k - specific problem type, k >=0 (analytical solution known for k = 0, 1, and 5) ## problem initial conditions: ***************************************************************** xspan = [0., xf] # problem domain range [x0, xf], where x₀ > 0 y0 = 1. # y(x0) = 1 y0p = 0. # y'(x0) = 0 nC = 2 # number of constraints ## construct univariate tfc class: ************************************************************* tfc = utfc(N, nC, int(m), basis=basis, x0=xspan[0], xf=xspan[1]) x = tfc.x H = tfc.H dH = tfc.dH H0 = H(x[0:1]) H0p = dH(x[0:1]) ## define tfc constrained expression and derivatives: ****************************************** # switching function phi1 = lambda x: np.ones_like(x) phi2 = lambda x: x # tfc constrained expression y = lambda x, xi: np.dot(H(x), xi) + phi1(x) * (y0 - np.dot( H0, xi)) + phi2(x) * (y0p - np.dot(H0p, xi)) yp = egrad(y) ypp = egrad(yp) ## define the loss function: ******************************************************************* L = lambda xi: x * ypp(x, xi) + 2. * yp(x, xi) + x * y(x, xi)**k ## solve the problem via nonlinear least-squares *********************************************** xi = np.zeros(H(x).shape[1]) # if k==0 or k==1, the problem is linear if k == 0 or k == 1: xi, time = LS(xi, L, timer=True) iter = 1 else: xi, iter, time = NLLS(xi, L, timer=True) ## compute the error (if k = 0, 1, or 5): ****************************************************** if k == 0: ytrue = 1. - 1. / 6. * x**2 elif k == 1: ytrue = onp.ones_like(x) ytrue[1:] = np.sin(x[1:]) / x[1:] elif k == 5: ytrue = (1. + x**2 / 3)**(-1 / 2) else: ytrue = np.empty_like(x) err = np.abs(y(x, xi) - ytrue) ## compute the residual of the loss vector: **************************************************** res = np.abs(L(xi)) return x, y(x, xi), err, res
def laneEmden_tfc(N, m, type, xspan, basis, iterMax, tol): ## Unpack Paramters: ********************************************************* x0 = xspan[0] xf = xspan[1] ## Initial Conditions: ******************************************************* y0 = 1. y0p = 0. nC = 2 # number of constraints ## Determine call tfc class needs to be 1 for ELMs if basis == 'CP' or basis == 'LeP': c = 2. / (xf - x0) elif basis == 'FS': c = 2. * np.pi / (xf - x0) else: c = 1. / (xf - x0) ## Compute true solution if type == 0: maxIter = 1 def ytrue(x): val = onp.zeros_like(x) val[0] = 1. val[1:] = 1. - 1. / 6. * x[1:]**2 return val elif type == 1: maxIter = 1 def ytrue(x): val = onp.zeros_like(x) val[0] = 1. val[1:] = np.sin(x[1:]) / x[1:] return val elif type == 5: maxIter = iterMax def ytrue(x): val = onp.zeros_like(x) val[0] = 1. val[1:] = (1. + x[1:]**2 / 3)**(-1 / 2) return val else: def ytrue(x): return np.nan * np.ones_like(x) err = np.ones_like(m) * np.nan res = np.ones_like(m) * np.nan ## GET CHEBYSHEV VALUES: ********************************************* tfc = utfc(N, nC, int(m), basis=basis, x0=x0, xf=xf) x = tfc.x H = tfc.H dH = tfc.dH H0 = H(x[0]) H0p = dH(x[0]) ## DEFINE THE ASSUMED SOLUTION: ************************************* phi1 = lambda x: np.ones_like(x) phi2 = lambda x: x y = lambda x,xi: np.dot(H(x),xi) \ + phi1(x)*(y0 - np.dot(H0,xi)) \ + phi2(x)*(y0p - np.dot(H0p,xi)) yp = egrad(y) ypp = egrad(yp) ## DEFINE LOSS AND JACOB ******************************************** L = jit(lambda xi: x * ypp(x, xi) + 2. * yp(x, xi) + x * y(x, xi)**type) ## SOLVE THE SYSTEM ************************************************* # Solve the problem xi = np.zeros(H(x).shape[1]) xi, _, time = NLLS(xi, L, timer=True, maxIter=maxIter) ## COMPUTE ERROR AND RESIDUAL *************************************** err = np.linalg.norm(y(x, xi) - ytrue(x)) res = np.linalg.norm(L(xi)) return err, res, time
def CalculateSolutionSplit(Pe): if Pe > 1e3: xpBoundL = 0. + 1.e-3 xpBoundU = 1. - 1e-3 else: xpBoundL = 0. + 1.e-1 xpBoundU = 1. - 1e-1 # Create the ToC Class: N = 200 m = 190 nC = 3 tfc = utfc(N, nC, m, basis='LeP', x0=-1., xf=1.) # Get the Chebyshev polynomials H = tfc.H dH = tfc.dH H0 = H(np.array([-1.])) Hf = H(np.array([1.])) Hd0 = dH(np.array([-1.])) Hdf = dH(np.array([1.])) # Create the constraint expression and its derivatives z = tfc.z xp = lambda xi: xi['xpHat'] + (xpBoundU - xi['xpHat']) * step(xi[ 'xpHat'] - xpBoundU) + (xpBoundL - xi['xpHat']) * step(xpBoundL - xi[ 'xpHat']) c1 = lambda xi: 2. / (xp(xi)) c2 = lambda xi: 2. / (1. - xp(xi)) x1 = lambda z, xi: (z + 1.) / c1(xi) x2 = lambda z, xi: (z + 1.) / c2(xi) + xp(xi) y1 = lambda z,xi: np.dot(H(z),xi['xi1'])+(1.-2.*z+z**2)/4.*(1.-np.dot(H0,xi['xi1']))\ +(3.+2.*z-z**2)/4.*(xi['y']-np.dot(Hf,xi['xi1']))\ +(-1.+z**2)/2.*(xi['yd']/c1(xi)-np.dot(Hdf,xi['xi1'])) ydz1 = egrad(y1, 0) yddz1 = egrad(ydz1, 0) yd1 = lambda z, xi: ydz1(z, xi) * c1(xi) ydd1 = lambda z, xi: yddz1(z, xi) * c1(xi)**2 y2 = lambda z,xi: np.dot(H(z),xi['xi2'])+(3.-2.*z-z**2)/4.*(xi['y']-np.dot(H0,xi['xi2']))\ +(1.-z**2)/2.*(xi['yd']/c2(xi)-np.dot(Hd0,xi['xi2']))\ +(1.+2.*z+z**2)/4.*(0.-np.dot(Hf,xi['xi2'])) ydz2 = egrad(y2, 0) yddz2 = egrad(ydz2, 0) yd2 = lambda z, xi: ydz2(z, xi) * c2(xi) ydd2 = lambda z, xi: yddz2(z, xi) * c2(xi)**2 # Solve the problem xi = TFCDict({ 'xi1': onp.zeros(H(z).shape[1]), 'xi2': onp.zeros(H(z).shape[1]), 'xpHat': onp.array([0.99]), 'y': onp.array([0.]), 'yd': onp.array([0.]) }) L1 = lambda xi: ydd1(z, xi) - Pe * yd1(z, xi) L2 = lambda xi: ydd2(z, xi) - Pe * yd2(z, xi) L = lambda xi: np.hstack([L1(xi), L2(xi)]) xi, it = NLLS(xi, L) # Create the test set: N = 1000 z = np.linspace(-1., 1., N) # Calculate the error and return the results X = np.hstack([x1(z, xi), x2(z, xi)]) Y = np.hstack([y1(z, xi), y2(z, xi)]) err = np.abs(Y - soln(X, Pe)) return np.max(err), np.mean(err)
(xFinal-x[0])/(xFinal-xInit)*(c/alpha-c/alpha*np.tanh(c*(xInit-c*x[1])/(2.*nu))-np.dot(H(xInit*np.ones_like(x[0]),x[1]),xi))+\ (x[0]-xInit)/(xFinal-xInit)*(c/alpha-c/alpha*np.tanh(c*(xFinal-c*x[1])/(2.*nu))-np.dot(H(xFinal*np.ones_like(x[0]),x[1]),xi)) u = lambda xi,*x: u1(xi,*x)+\ c/alpha-c/alpha*np.tanh(c*x[0]/(2.*nu))-u1(xi,x[0],np.zeros_like(x[1])) # Create the residual ux = egrad(u, 1) d2x = egrad(ux, 1) ut = egrad(u, 2) r = lambda xi: ut(xi, *x) + alpha * u(xi, *x) * ux(xi, *x) - nu * d2x(xi, *x) # Solve the problem xi = np.zeros(H(*x).shape[1]) xi, it, time = NLLS(xi, r, method='lstsq', timer=True, timerType="perf_counter") # Calculate error at the test points: dark = np.meshgrid(np.linspace(xInit, xFinal, nTest), np.linspace(0., 1., nTest)) xTest = tuple([j.flatten() for j in dark]) err = np.abs(u(xi, *xTest) - real(*xTest)) print("Training time: " + str(time)) print("Max error: " + str(np.max(err))) print("Mean error: " + str(np.mean(err))) # Plot analytical solution if usePlotly: