def matrix_solve_steepest_descent(matrix,
                                  vector_b,
                                  tol,
                                  max_iter,
                                  getIterCount=False):
    error = tol * 10
    count = 0
    x_i = vector_b.copy()  #x_0
    while error > tol and count < max_iter:
        #print(count)# used for testing
        #print(error)# used for testing
        count += 1
        residual = vector_add(
            vector_b,
            vector_scal_mult(
                -1, convert_vec_mat(matrix_mult(matrix,
                                                convert_vec_mat(x_i)))))
        d1 = vector_dot(residual, residual)
        d2 = vector_dot(
            residual,
            convert_vec_mat(matrix_mult(matrix, convert_vec_mat(residual))))
        alpha = d1 / d2
        x_i = vector_add(x_i, vector_scal_mult(alpha, residual))
        error = vector_2norm(residual)
    if getIterCount == True:
        return x_i, count
    else:
        return x_i
Exemplo n.º 2
0
def matrix_rayleigh_quotient_iteration(matrix,
                                       tol,
                                       max_iter,
                                       getIterCount=False):
    error = tol * 10
    count = 0
    v_i = matrix[0]
    v_i = vector_scal_mult(1 / vector_2norm(v_i), v_i)
    eigenvaluenew = matrix_mult(matrix_transpose(convert_vec_mat(v_i)),
                                matrix_mult(matrix,
                                            convert_vec_mat(v_i)))[0][0]
    I = [[int(i == j) for i in range(len(matrix))] for j in range(len(matrix))]

    while error > tol and count < max_iter:
        eigenvalueold = eigenvaluenew
        v_i = matrix_solve_LU(
            matrix_add(matrix, matrix_scal_mult(-eigenvaluenew, I)), v_i)
        v_i = vector_scal_mult(1 / vector_2norm(v_i), v_i)
        eigenvaluenew = matrix_mult(matrix_transpose(convert_vec_mat(v_i)),
                                    matrix_mult(matrix,
                                                convert_vec_mat(v_i)))[0][0]
        error = abs(eigenvaluenew - eigenvalueold)
        count += 1
    if getIterCount == True:
        return eigenvaluenew, count
    else:
        return eigenvaluenew
def matrix_solve_jacobian(matrix, vector_b, tol, max_iter, getIterCount=False):
    xnew = [0 for i in range(len(matrix))]
    error = tol * 10
    count = 0
    while error > tol and count < max_iter:
        # print(count) used for testing
        # print(error) used for testing
        count += 1
        xold = xnew.copy()
        xnew = vector_b.copy()
        for i in range(len(matrix)):
            for j in range(i):
                xnew[i] = xnew[i] - matrix[i][j] * xold[j]
            for j in range(i + 1, len(xnew)):
                xnew[i] = xnew[i] - matrix[i][j] * xold[j]
            xnew[i] = xnew[i] / matrix[i][i]

        error = vector_2norm(
            vector_add(
                convert_vec_mat(matrix_mult(matrix, convert_vec_mat(xnew))),
                vector_scal_mult(-1, vector_b)))
    if getIterCount == True:
        return xnew, count
    else:
        return xnew
Exemplo n.º 4
0
def matrix_solve_conjugate_gradient(matrix,
                                    vector_b,
                                    tol,
                                    max_iter,
                                    getIterCount=False):
    error = tol * 10
    count = 0
    x_i = vector_b.copy()  # x_0
    residual = vector_add(
        vector_b,
        vector_scal_mult(
            -1, convert_vec_mat(matrix_mult(matrix, convert_vec_mat(x_i)))))
    direction = residual
    d1new = vector_dot(residual, residual)

    while error > tol and count < max_iter:
        s = convert_vec_mat(matrix_mult(matrix, convert_vec_mat(direction)))
        d1old = d1new
        alpha = d1old / vector_dot(direction, s)
        x_i = vector_add(x_i, vector_scal_mult(alpha, direction))
        residual = vector_add(residual, vector_scal_mult(-alpha, s))
        d1new = vector_dot(residual, residual)
        direction = vector_add(residual,
                               vector_scal_mult(d1new / d1old, direction))
        count += 1
        error = vector_2norm(residual)
    if getIterCount == True:
        return x_i, count
    else:
        return x_i
def matrix_inverse_power_iteration(matrix,
                                   alpha,
                                   tol,
                                   max_iter,
                                   getIterCount=False):
    error = tol * 10
    count = 0
    eigenvaluenew = 0
    I = [[-int(i == j) * alpha for i in range(len(matrix))]
         for j in range(len(matrix))]
    v_i = matrix[0]
    while error > tol and count < max_iter:

        eigenvalueold = eigenvaluenew
        v_i = matrix_solve_LU(matrix_add(matrix, I), v_i)
        v_i = vector_scal_mult(1 / vector_2norm(v_i), v_i)
        eigenvaluenew = matrix_mult(matrix_transpose(convert_vec_mat(v_i)),
                                    matrix_mult(matrix,
                                                convert_vec_mat(v_i)))[0][0]
        count += 1
        error = abs(eigenvaluenew - eigenvalueold)
    if getIterCount == True:
        return eigenvaluenew, count
    else:
        return eigenvaluenew
Exemplo n.º 6
0
def matrix_solve_least_squares_QR(matrix,vector):
    (matrix_Q, matrix_R) = matrix_QR_factorization_mod(matrix)
    print(matrix_Q)
    print(matrix_R)
    matrix_Qtb = matrix_mult(matrix_transpose(matrix_Q), convert_vec_mat(vector))
    print(matrix_Qtb)
    solution = matrix_solve_upper_tri(matrix_R, convert_vec_mat(matrix_Qtb))
    print(solution)
    return solution
def matrix_power_iteration(matrix, tol, max_iter):
    error = tol * 10
    count = 0
    eigenvaluenew = 1
    v_i = convert_vec_mat(matrix[0])
    while error > tol and count < max_iter:
        eigenvalueold = eigenvaluenew
        v_i = matrix_mult(matrix, v_i)
        v_i = matrix_scal_mult(1 / vector_2norm(convert_vec_mat(v_i)), v_i)
        eigenvaluenew = matrix_mult(matrix_transpose(v_i),
                                    matrix_mult(matrix, v_i))[0][0]
        count += 1
        error = abs(eigenvaluenew - eigenvalueold)
    return eigenvaluenew
Exemplo n.º 8
0
def matrix_solve_gauss_seidel_test():
    matrix1 = gen_rand_matrix(100)
    matrix2 = gen_sym_matrix(100)
    matrix3 = gen_diagdom_matrix(100)
    matrix4 = gen_diagdom_sym_matrix(100)
    matrices = [matrix1, matrix2, matrix3, matrix4]
    vector = [1] * len(matrix1)
    for i in range(len(matrices)):
        b = matrix_mult(matrices[i], convert_vec_mat(vector))
        starttime = time.time()
        solution = matrix_solve_gauss_seidel(matrices[i],
                                             convert_vec_mat(b),
                                             0.00001,
                                             1000,
                                             getIterCount=True)
        print("Time: " + str(time.time() - starttime))
        print("# of iterations: " + str(solution[1]))
        print(solution[0])
Exemplo n.º 9
0
def steepest_descent_conjugate_gradient_compare():
    n=100;
    random_matrix = gen_diagdom_sym_matrix(n)
    b = convert_vec_mat(matrix_mult(random_matrix,[[1]] * n)) #the solution should be a vector of 1s.
    (solution1,iterCount1) = matrix_solve_steepest_descent(random_matrix,b,0.0001,10000,True)
    (solution2, iterCount2) = matrix_solve_conjugate_gradient(random_matrix, b, 0.0001, 10000, True)

    print("Steepest Descent " + str(n) + "x" + str(n) + ": Absolute Error=" + str(abs_error_2norm([1] * n,solution1)) + "  Iteration Count=" + str(iterCount1))
    print("Conjugate Gradient " + str(n) + "x" + str(n) + ": Absolute Error=" + str(abs_error_2norm([1] * n,solution2)) + "  Iteration Count=" + str(iterCount2))
Exemplo n.º 10
0
def matrix_solve_jacobian_test():
    matrix = gen_sqr_diagdom_matrix()
    vector = [1] * len(matrix)
    print("created matrix")
    starttime = time.time()

    solution = matrix_solve_jacobian(matrix, vector, 0.001, 10000)

    print("Time: " + str(time.time() - starttime))
    print(matrix_mult(matrix, convert_vec_mat(solution)))
Exemplo n.º 11
0
def matrix_solve_jacobian_smart(matrix,
                                vector_b,
                                tol,
                                max_iter,
                                getIterCount=False):
    matrix_mod = matrix_mult(matrix_transpose(matrix), matrix)
    vector_mod = convert_vec_mat(
        matrix_mult(matrix_transpose(matrix), convert_vec_mat(vector_b)))
    alpha = 0
    #This code finds the right valu for alpha to garintee that matrix_mod is diagonoly dominant.
    for i in range(len(matrix_mod)):
        tempVal = 0
        for j in range(len(matrix_mod[0])):
            if j != i:
                tempVal += matrix_mod[i][j]
            else:
                tempVal -= matrix_mod[i][j]
        alpha = max(alpha, tempVal)
    xnew = [0 for i in range(len(matrix_mod))]
    error = tol * 10
    count = 0
    while error > tol and count < max_iter:
        count += 1
        xold = xnew.copy()
        xnew = [(vector_mod[i] + alpha * xold[i]) for i in range(len(xold))]
        for i in range(len(matrix_mod)):
            for j in range(i):
                xnew[i] = xnew[i] - matrix_mod[i][j] * xold[j]
            for j in range(i + 1, len(xnew)):
                xnew[i] = xnew[i] - matrix_mod[i][j] * xold[j]
        for j in range(len(xnew)):
            xnew[j] = xnew[j] / (matrix_mod[j][j] + alpha)

        error = vector_2norm(
            vector_add(
                convert_vec_mat(matrix_mult(matrix, convert_vec_mat(xnew))),
                vector_scal_mult(-1, vector_b)))
    if getIterCount == True:
        return xnew, count
    else:
        return xnew
Exemplo n.º 12
0
def hilbert_matrix_steepest_descent_test():

    selection = [4, 8, 16, 32]
    hilbert_matrix = []
    solution = []

    # Create the hilbert matrices and their corresponding Q factorization matrix.
    for n in selection:
        hilbert_matrix.append([[1 / (1 + i + j) for i in range(n)]
                               for j in range(n)])  # Hilbert Matrix generator
        solution.append(
            matrix_solve_steepest_descent(hilbert_matrix[-1], [1] * n, 0.0001,
                                          10000))

    for i in range(0, len(selection)):  # 4, 6, 8, 10
        #print(matrix_mult(hilbert_matrix[i],convert_vec_mat(solution[i])))
        print("Absolute error for " + str(selection[i]) + "x" +
              str(selection[i]) + ": " + str(
                  abs_error_2norm([1] * selection[i],
                                  convert_vec_mat(
                                      matrix_mult(hilbert_matrix[i],
                                                  convert_vec_mat(
                                                      solution[i]))))))
        print(solution[i])
def steepest_descent_conjugate_gradient_gauss_seidel_compare():
    n=100;
    random_matrix = gen_diagdom_sym_matrix(n)
    b = convert_vec_mat(matrix_mult(random_matrix,[[1]] * n)) #the solution should be a vector of 1s.
    start_time = time.time()
    (solution1,iterCount1) = matrix_solve_steepest_descent(random_matrix,b,0.00000000001,10000,True)
    time1 = time.time() - start_time
    start_time = time.time()
    (solution2, iterCount2) = matrix_solve_conjugate_gradient(random_matrix, b, 0.00000000001, 10000, True)
    time2 = time.time() - start_time
    start_time = time.time()
    (solution3, iterCount3) = matrix_solve_gauss_seidel(random_matrix, b, 0.00000000001, 10000, True)
    time3 = time.time() - start_time
    print("Steepest Descent " + str(n) + "x" + str(n) + ": Absolute Error=" + str(abs_error_2norm([1] * n,solution1)) + "  Iteration Count=" + str(iterCount1) + " time=" + str(time1))
    print("Conjugate Gradient " + str(n) + "x" + str(n) + ": Absolute Error=" + str(abs_error_2norm([1] * n,solution2)) + "  Iteration Count=" + str(iterCount2) + " time=" + str(time2))
    print("Gauss Seidel " + str(n) + "x" + str(n) + ": Absolute Error=" + str(abs_error_2norm([1] * n,solution3)) + "  Iteration Count=" + str(iterCount3) + " time=" + str(time3))
Exemplo n.º 14
0
def matrix_solve_least_square(matrix, vector):
    matrix_At_A = matrix_mult(matrix_transpose(matrix), matrix)
    vector_At_b = matrix_mult(matrix_transpose(matrix),
                              convert_vec_mat(vector))
    solution = matrix_solve(matrix_At_A, convert_vec_mat(vector_At_b))
    return solution
Exemplo n.º 15
0
def matrix_solve_least_squares_QR_householder(matrix, vector):
    (matrix_Q, matrix_R) = matrix_QR_factorization_householder(matrix)
    matrix_Qtb = matrix_mult(matrix_transpose(matrix_Q),
                             convert_vec_mat(vector))
    solution = matrix_solve_upper_tri(matrix_R, convert_vec_mat(matrix_Qtb))
    return solution
Exemplo n.º 16
0
def matrix_solve_QR_factorization(matrix, vector):
    (matrix_Q, matrix_R) = matrix_QR_factorization(matrix)
    matrix_Qtb = matrix_mult(matrix_transpose(matrix_Q),
                             convert_vec_mat(vector))
    solution = matrix_solve_upper_tri(matrix_R, convert_vec_mat(matrix_Qtb))
    return solution