def newton2D_exact(f, gradf, g, gradg, x0, y0, maxit, tol, verbose): # Guard against starting near and inflection point [gradfx, gradfy] = gradf(x0, y0) [gradgx, gradgy] = gradg(x0, y0) if (abs(min([gradfx, gradfy, gradgx, gradgy])) < tol): print( " Attempting to start Newton iterations near inflection point; consider restarting with another initial guess" ) x0 = x0 + 1 y0 = y0 + 1 # 2D Newton iterations it = 1 rootx = x0 rooty = y0 fval = f(rootx, rooty) gval = g(rootx, rooty) converged = False while (not converged and it <= maxit): [gradfx, gradfy] = gradf(rootx, rooty) [gradgx, gradgy] = gradg(rootx, rooty) A = np.array([[gradfx, gradfy], [gradgx, gradgy]]) fvec = np.array([[fval], [gval]]) [Amod, order] = Gauss_elim(A, -1.0 * fvec, False) dxvec = backsub(Amod[order, :], False) Areord = Amod[order, 0:2] detA = Areord[0, 0] * Areord[1, 1] if (abs(detA) < 1e-6): print( " Ended up at a point where the Jacobian is singular, try a different starting point" ) sys.exit() rootx = rootx + dxvec[0] rooty = rooty + dxvec[1] fval = f(rootx, rooty) gval = g(rootx, rooty) if (verbose): print("iteration: ", it, "x,y= ", rootx, rooty, "f,g= ", fval, gval) #print("det(J)= ",detA) #print("J= ",A) it = it + 1 converged = abs(fval) < tol and abs(gval) < tol it = it - 1 if (not converged): print(" Used max number of iterations") return [rootx, rooty, it, converged]
from elimtools import Gauss_elim, backsub import matplotlib.pyplot as plt # define a range of system sizes to investigate nvals = np.arange(50, 550, 50) # note arange excluded the end of the range testtimes = np.zeros(nvals.size) # time taken for each system size lrep = 1 # number of times to repeat each step (for consistency) # Perform the solves for each system size for ind, n in enumerate(list(nvals)): A = np.random.randn(n, n) b = np.random.randn(n, 1) for irep in range(0, lrep): tstart = time.time() [Amod, order] = Gauss_elim(A, b, False) Amodsub = np.copy(Amod[order, :]) x = backsub(Amodsub, False) tend = time.time() testtimes[ind] = testtimes[ind] + (tend - tstart) / lrep print("Solution for system of size: ", n, " took time: ", testtimes[ind]) # Plot the (average) time elapsed during each solve plt.figure(1) plt.plot(nvals, testtimes, 'o') plt.xlabel("system size (no. of unknowns)") plt.ylabel("solution time (s)") plt.title("Performance of self-coded Gaussian elimination") plt.show()
""" Created on Thu Aug 20 15:40:51 2020 @author: zettergm known issues: 1) Need to control number of decimal places in output printing to improve readability """ import numpy as np from elimtools import Gauss_elim, backsub nrow = 10 ncol = 10 A = np.random.randn(nrow, ncol) b = np.random.randn(nrow, 1) # Simple test problem for debugging #A=np.array([[1.0, 4.0, 2.0], [3.0, 2.0, 1.0], [2.0, 1.0, 3.0]]) # system to be solved #b=np.array([[15.0], [10.0], [13.0]]) # RHS of system # Solve with elimtools [Awork, order] = Gauss_elim(A, b, True) x = backsub(Awork[order, :], True) print("Value of x computed via Gaussian elimination and backsubstitution: ") print(x) # Use built-in linear algebra routines to solve and compare xpyth = np.linalg.solve(A, b) print("Solution vector computed via built-in numpy routine") print(xpyth)
b = 3 #slope, linear fn. minx = -5 maxx = 5 xdata = np.linspace(minx, maxx, n) # Gemeration of Gaussian random numbers in Python dev = 5.0 mean = 0.5 #models callibration error in measurement, offset noise = dev * np.random.randn(n) + mean ytrue = a + b * xdata ydata = ytrue + noise # Plot of function and noisy data plt.figure(1) plt.plot(xdata, ytrue, "--") plt.plot(xdata, ydata, "o", markersize=6) plt.xlabel("x") plt.ylabel("y") # Solution using least squares J = np.concatenate((np.reshape(np.ones(n), (n, 1)), np.reshape(xdata, (n, 1))), axis=1) M = J.transpose() @ J yprime = J.transpose() @ np.reshape(ydata, (n, 1)) [Mmod, order] = Gauss_elim(M, yprime, False) avec = backsub(Mmod[order, :], False) yfit = avec[0] + avec[1] * xdata plt.plot(xdata, yfit, '-') plt.legend(("original function", "noisy data", "fitted function")) plt.show()
# Data for a direct polynomial fit #x=np.array([1,2,3,4]) #y=2*x**3-3*x**2+4*x+9 x=np.array([1,2,3,4,5,6]) y=x**5-2*x**4+2*x**3-3*x**2+4*x+9 # Create a plot to show results plt.figure(1) plt.plot(x,y,'*',markersize=20) plt.xlabel("x") plt.ylabel("y") plt.title("Illustration of direct fit methods") # Fit using Python functions (numpy.polyfit) n=x.size-1 # degree of polynomial to be fitted, compute from data point array size coeffs=np.polyfit(x,y,n) xlarge=np.linspace(x.min(),x.max(),24) polyfun=np.poly1d(coeffs) plt.plot(xlarge,polyfun(xlarge),'--') # Execute direct fit using self-coded Gaussian Elimination A=np.zeros((n+1,n+1)) for icol in range(n,-1,-1): newcol=x**icol A[:,n-icol]=newcol [Amod,order]=Gauss_elim(A,y.reshape(-1,1),False) #note conversion of y to column vector coeffsGE=backsub(Amod[order,:],False) polyfunGE=np.poly1d(coeffs) plt.plot(xlarge,polyfunGE(xlarge),'.') plt.legend(("original data","built-in fit","manual GE fit")) plt.show()