def test_single_values(self): # Test with ints a = np.ndarray((1, 1)) a[0, 0] = 2 b = np.ndarray((1, 1)) b[0, 0] = 10 x = gauss_elimination(a, b) self.assertAlmostEqual(x[0, 0], 5, 'Failed for scalar values') # Test with float result a[0, 0] = 2 b[0, 0] = 3 x = gauss_elimination(a, b) self.assertAlmostEqual(x[0, 0], 1.5, 'Failed for scalar values')
def test_nominal_01(self): # Given A = np.array([[ 3, 2, -3, 1, 6], \ [ 6, 2, 4, 0, 5], \ [-3, 1, 0, 2, 3], \ [ 5, -8, 1, 2, 6], \ [ 5, -8, 1, 4, 6]],\ dtype=np.float64) B = np.array([[-24, 5 ], \ [-6, 3 ], \ [-9, 8 ], \ [ 24, 2 ], \ [ 36, 12]], \ dtype=np.float64 ) # # True solution xtru = np.linalg.solve(A, B) # Computed solution [x_soln, A_aug] = gauss_elimination(A, B, True) # Check x solution precision = 1e-12 for i in range(0, xtru.shape[0]): for j in range(0, xtru.shape[1]): assert abs(xtru[i, j] - x_soln[i, j]) < precision, 'Wrong solution' # # Check that the triangular matrix is returned correctly for j in range(0, i): assert not A_aug[ i, j], 'Non-zero element in lower triangular area'
def test_gauss_elimination_00(self): n = 2 mat_a = m.get_random_mat(n, n) vec_x = m.get_random_vector(n) vec_b = m.mul_mat_vec(mat_a, vec_x) vec_x_sol = ge.gauss_elimination(mat_a, vec_x_sol) del vec_x_sol del vec_x del vec_b del mat_a[:] del mat_a
def test_gauss_elimination_01(self): for n in range(3, 10): mat_a = m.get_random_mat(n, n) vec_x = m.get_random_vector(n) vec_b = m.mul_mat_vec(mat_a, vec_x) vec_x_sol = ge.gauss_elimination(mat_a, vec_b) del mat_a[:] del mat_a del vec_x_sol del vec_x del vec_b
def test_gauss_elimination_01(self): for n in range(3, 10): mat_a = m.get_random_mat(n, n) vec_x = m.get_random_vector(n) vec_b = m.mul_mat_vec(mat_a, vec_x) vec_x_sol = ge.gauss_elimination(mat_a, vec_b) self.assertSequenceAlmostEqual(vec_x, vec_x_sol) del mat_a[:] del mat_a del vec_x_sol del vec_x del vec_b
def test_nominal_02(self): # Given A = np.array([[1, 1, 1], [2, 2, 1], [3, 4, 2]]) B = np.array([[1], [2], [2]]) # True solution xtru = np.linalg.solve(A, B) # Computed solution [x_soln, A_aug] = gauss_elimination(A, B, True) # Check x solution precision = 1e-12 for i in range(0, xtru.shape[0]): for j in range(0, xtru.shape[1]): assert abs(xtru[i, j] - x_soln[i, j]) < precision, 'Wrong solution' # # Check that the triangular matrix is returned correctly for j in range(0, i): assert not A_aug[ i, j], 'Non-zero element in lower triangular area'
def finite_difference(yhist, h, n=1, customge=False): ''' Forward, Central, Backwards finite difference calculation of derivative This function uses the Central Finite Difference method to compute the derivative for the given data vector. At the ends of the array, central difference doesn't work, so the forward difference method and backwards difference methods are used instead. Note that this function decides for you to use forward and bakwards differencing functions at either end of the dataset. This cannot be turned off or changed. @arg yhist - M x N numpy.ndarray Function value time history, where N is the length of the dataset and M is the number different things to take the derivative of. M is usually 1. N corresponds to the number of samples in the time history of the dataset. h - double Time step n - double (optional) Order of finite difference customge - bool (optional) Specify to use the custom gauss elimination function. True to use the function, False use numpy's built in function. False by default. @return ydothist - N x M numpy.ndarray Finite difference derivative time history @dependencies python 3.6.0 numpy scipy @author: Matt Marti @date: 2019-06-14 ''' # Raise Type Errors on bad input if type(yhist) != np.ndarray: raise TypeError("Argument 'yhist' is not of type numpy.ndarray") # if type(h) not in [int, float]: raise TypeError("Argument 'h' is not of type [int, float]") elif type(h) == int: h = h + 0.0 # if type(n) != int: raise TypeError("Argument 'n' is not of type int") # # Check yhist shape assert n > 0, "Argument 'n' is not greater than zero" if yhist.shape.__len__() == 1: M = 1 yhistold = yhist.copy() yhist = np.ndarray((1, yhistold.shape[0])) for i in range(0, yhistold.shape[0]): yhist[0, i] = yhistold[i] # else: M = yhist.shape[0] # N = yhist.shape[1] assert N >= n + 1, 'Not enough data points for given order' # Preallocate output ydothist = np.zeros(yhist.shape, np.float64) hmat = np.zeros((n, n), np.float64) # Forward difference method for i in range(0, n): # Delta t matrix for j in range(1, n + 1): d = 1.0 hj = h * j for k in range(1, n + 1): d = d * k hmat[j - 1, k - 1] = (hj**k) / d # # # Forward finite difference k = 0 yfvec = np.ndarray((M, n)) for j in range(i + 1, i + n + 1): yfvec[:, k] = yhist[:, j] - yhist[:, i] k += 1 # # Compute derivative if customge: ydoti = gauss_elimination(hmat, yfvec.transpose()) else: ydoti = spla.solve(hmat, yfvec.transpose()) # # Assign output for j in range(ydothist.shape[0]): ydothist[j, i] = ydoti[0, j] # # # Central difference method for i in range(n, N - n): # Delta t matrix for j in range(1, n + 1): d = 1.0 hj = h * j for k in range(1, n + 1): d = d * k hmat[j - 1, k - 1] = (hj**k) / d # # # Forward finite difference k = 0 yfvec = np.ndarray((M, n)) for j in range(i + 1, i + n + 1): yfvec[:, k] = yhist[:, j] - yhist[:, i] k += 1 # # Backward finite difference k = 0 ybvec = np.ndarray((M, n)) for j in range(i - 1, i - n - 1, -1): ybvec[:, k] = yhist[:, i] - yhist[:, j] k += 1 # # Compute derivative if customge: ydoti_f = gauss_elimination(hmat, yfvec.transpose()) ydoti_b = gauss_elimination(hmat, ybvec.transpose()) ydoti = 0.5 * (ydoti_f + ydoti_b) else: ydoti_f = spla.solve(hmat, yfvec.transpose()) ydoti_b = spla.solve(hmat, ybvec.transpose()) ydoti = 0.5 * (ydoti_f + ydoti_b) # # Assign output for j in range(ydothist.shape[0]): ydothist[j, i] = ydoti[0, j] # # # Backwards difference method for i in range(N - n, N): # Delta t matrix for j in range(1, n + 1): d = 1.0 hj = h * j for k in range(1, n + 1): d = d * k hmat[j - 1, k - 1] = (hj**k) / d # # # Backward finite difference k = 0 ybvec = np.ndarray((M, n)) for j in range(i - 1, i - n - 1, -1): ybvec[:, k] = yhist[:, i] - yhist[:, j] k += 1 # # Compute derivative if customge: ydoti = gauss_elimination(hmat, ybvec.transpose()) else: ydoti = spla.solve(hmat, ybvec.transpose()) # # Assign output for j in range(ydothist.shape[0]): ydothist[j, i] = ydoti[0, j] # # return ydothist