Beispiel #1
0
def cholesky_solve(A, b, **kwargs):
    """
    Ax = b => x

    Solve a symmetric positive-definite linear equation system.
    This is twice as efficient as lu_solve.

    Typical use cases:
    * A.T*A
    * Hessian matrix
    * differential equations
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows !=  A.cols:
        raise ValueError('can only solve determined system')
    # Cholesky factorization
    L = cholesky(A)
    # solve
    n = L.rows
    assert len(b) == n
    for i in xrange(n):
        b[i] -= fsum(L[i,j] * b[j] for j in xrange(i))
        b[i] /= L[i,i]
    x = U_solve(L.T, b)
    return x
Beispiel #2
0
def lu(A):
    """
    A -> P, L, U

    LU factorisation of a square matrix A. L is the lower, U the upper part.
    P is the permutation matrix indicating the row swaps.

    P*A = L*U

    If you need efficiency, use the low-level method LU_decomp instead, it's
    much more memory efficient.
    """
    # get factorization
    A, p = LU_decomp(A.copy())
    n = A.rows
    L = matrix(n)
    U = matrix(n)
    for i in xrange(n):
        for j in xrange(n):
            if i > j:
                L[i, j] = A[i, j]
            elif i == j:
                L[i, j] = 1
                U[i, j] = A[i, j]
            else:
                U[i, j] = A[i, j]
    # calculate permutation matrix
    P = eye(n)
    for k in xrange(len(p)):
        swap_row(P, k, p[k])
    return P, L, U
Beispiel #3
0
def inverse(A, **kwargs):
    """
    Calculate the inverse of a matrix.

    If you want to solve an equation system Ax = b, it's recommended to use
    solve(A, b) instead, it's about 3 times more efficient.
    """
    # do not overwrite A
    A = matrix(A, **kwargs).copy()
    n = A.rows
    # get LU factorisation
    A, p = LU_decomp(A)
    cols = []
    # calculate unit vectors and solve corresponding system to get columns
    for i in xrange(1, n + 1):
        e = unitvector(n, i)
        y = L_solve(A, e, p)
        cols.append(U_solve(A, y))
    # convert columns to matrix
    inv = []
    for i in xrange(n):
        row = []
        for j in xrange(n):
            row.append(cols[j][i])
        inv.append(row)
    return matrix(inv, **kwargs)
Beispiel #4
0
def inverse(A, **kwargs):
    """
    Calculate the inverse of a matrix.

    If you want to solve an equation system Ax = b, it's recommended to use
    solve(A, b) instead, it's about 3 times more efficient.
    """
    # do not overwrite A
    A = matrix(A, **kwargs).copy()
    n = A.rows
    # get LU factorisation
    A, p = LU_decomp(A)
    cols = []
    # calculate unit vectors and solve corresponding system to get columns
    for i in xrange(1, n + 1):
        e = unitvector(n, i)
        y = L_solve(A, e, p)
        cols.append(U_solve(A, y))
    # convert columns to matrix
    inv = []
    for i in xrange(n):
        row = []
        for j in xrange(n):
            row.append(cols[j][i])
        inv.append(row)
    return matrix(inv, **kwargs)
Beispiel #5
0
def lu_solve(A, b, **kwargs):
    """
    Ax = b => x

    Solve a determined or overdetermined linear equations system.
    Fast LU decomposition is used, which is less accurate than QR decomposition
    (especially for overdetermined systems), but it's twice as efficient.
    Use qr_solve if you want more precison or have to solve a very ill-
    conditioned system.
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows < A.cols:
        raise ValueError('cannot solve underdetermined system')
    if A.rows > A.cols:
        # use least-squares method if overdetermined
        # (this increases errors)
        AT = A.T
        A = AT * A
        b = AT * b
        return cholesky_solve(A, b)
    else:
        # LU factorization
        A, p = LU_decomp(A)
        b = L_solve(A, b, p)
        x = U_solve(A, b)
        return x
Beispiel #6
0
def lu(A):
    """
    A -> P, L, U

    LU factorisation of a square matrix A. L is the lower, U the upper part.
    P is the permutation matrix indicating the row swaps.

    P*A = L*U

    If you need efficiency, use the low-level method LU_decomp instead, it's
    much more memory efficient.
    """
    # get factorization
    A, p = LU_decomp(A)
    n = A.rows
    L = matrix(n)
    U = matrix(n)
    for i in xrange(n):
        for j in xrange(n):
            if i > j:
                L[i,j] = A[i,j]
            elif i == j:
                L[i,j] = 1
                U[i,j] = A[i,j]
            else:
                U[i,j] = A[i,j]
    # calculate permutation matrix
    P = eye(n)
    for k in xrange(len(p)):
        swap_row(P, k, p[k])
    return P, L, U
Beispiel #7
0
def lu_solve(A, b, **kwargs):
    """
    Ax = b => x

    Solve a determined or overdetermined linear equations system.
    Fast LU decomposition is used, which is less accurate than QR decomposition
    (especially for overdetermined systems), but it's twice as efficient.
    Use qr_solve if you want more precison or have to solve a very ill-
    conditioned system.
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows < A.cols:
        raise ValueError('cannot solve underdetermined system')
    if A.rows > A.cols:
        # use least-squares method if overdetermined
        # (this increases errors)
        AT = A.T
        A = AT * A
        b = AT * b
        return cholesky_solve(A, b)
    else:
        # LU factorization
        A, p = LU_decomp(A)
        b = L_solve(A, b, p)
        x = U_solve(A, b)
        return x
Beispiel #8
0
def cholesky_solve(A, b, **kwargs):
    """
    Ax = b => x

    Solve a symmetric positive-definite linear equation system.
    This is twice as efficient as lu_solve.

    Typical use cases:
    * A.T*A
    * Hessian matrix
    * differential equations
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows != A.cols:
        raise ValueError('can only solve determined system')
    # Cholesky factorization
    L = cholesky(A)
    # solve
    n = L.rows
    assert len(b) == n
    for i in xrange(n):
        b[i] -= sum((L[i, j] * b[j] for j in xrange(i)))
        b[i] /= L[i, i]
    x = U_solve(L.T, b)
    return x
Beispiel #9
0
def residual(A, x, b, **kwargs):
    """
    Calculate the residual of a solution to a linear equation system.

    r = A*x - b for A*x = b
    """
    oldprec = mp.prec
    try:
        mp.prec *= 2
        A, x, b = matrix(A, **kwargs), matrix(x, **kwargs), matrix(b, **kwargs)
        return A * x - b
    finally:
        mp.prec = oldprec
Beispiel #10
0
def residual(A, x, b, **kwargs):
    """
    Calculate the residual of a solution to a linear equation system.

    r = A*x - b for A*x = b
    """
    oldprec = mp.prec
    try:
        mp.prec *= 2
        A, x, b = matrix(A, **kwargs), matrix(x, **kwargs), matrix(b, **kwargs)
        return A*x - b
    finally:
        mp.prec = oldprec
def z_rotation(rad):
    """
    returns a transformation matrix in the z direction
    """
    return matrix(4, 4,
                  ([[cos(rad), -sin(rad), 0, 0],
                    [sin(rad), cos(rad), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]))
Beispiel #12
0
 def intersection(self,Lray):
     system = matrix([[self.vect1.x , self.vect2.x , -Lray.direction.x , Lray.origin.x-self.point.x],
                      [self.vect1.y , self.vect2.y , -Lray.direction.y , Lray.origin.y-self.point.y],
                      [self.vect1.z , self.vect2.z , -Lray.direction.z , Lray.origin.z-self.point.z]])
     
     system.echelon_reduce()
     
     
     if 1 in system[:,2].flat():
         index_value = system[:,2].flat().index(1)
     else:
         return False,0
     
     if 1 in system[:,0].flat():
         a_index = system[:,0].flat().index(1)
         a = system[a_index,3].flat()[0]
     else:
         a = 0
     
     if 1 in system[:,1].flat():
         b_index = system[:,1].flat().index(1)
         b = system[b_index,3].flat()[0]
     else:
         b = 0
     
     if abs(a) <= 1 and abs(b) <= 1:
         
         t_value = system[index_value,3].flat()[0]
         
         return True,t_value
     else:
         return False,0
Beispiel #13
0
def lu_solve_mat(a, b):
    """Solve a * x = b  where a and b are matrices."""
    r = matrix(a.rows, b.cols)
    for i in range(b.cols):
        c = lu_solve(a, b.column(i))
        for j in range(len(c)):
            r[j, i] = c[j]
    return r
Beispiel #14
0
def lu_solve_mat(a, b):
    """Solve a * x = b  where a and b are matrices."""
    r = matrix(a.rows, b.cols)
    for i in range(b.cols):
        c = lu_solve(a, b.column(i))
        for j in range(len(c)):
            r[j, i] = c[j]
    return r
Beispiel #15
0
def qr_solve(A, b, norm=norm, **kwargs):
    """
    Ax = b => x, ||Ax - b||

    Solve a determined or overdetermined linear equations system and
    calculate the norm of the residual (error).
    QR decompostion using Householder factorization is applied, which gives very
    accurate results even for ill-conditioned matrices. qr_solve is twice as
    efficient.
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows < A.cols:
        raise ValueError('cannot solve underdetermined system')
    H, p, x, r = householder(extend(A, b))
    res = norm(r)
    # calculate residual "manually" for determined systems
    if res == 0:
        res = norm(residual(A, x, b))
    return matrix(x, **kwargs), res
Beispiel #16
0
def qr_solve(A, b, norm=lambda x: norm_p(x, 2), **kwargs):
    """
    Ax = b => x, ||Ax - b||

    Solve a determined or overdetermined linear equations system and
    calculate the norm of the residual (error).
    QR decompostion using Householder factorization is applied, which gives very
    accurate results even for ill-conditioned matrices. qr_solve is twice as
    efficient.
    """
    # do not overwrite A nor b
    A, b = matrix(A, **kwargs).copy(), matrix(b, **kwargs).copy()
    if A.rows < A.cols:
        raise ValueError('cannot solve underdetermined system')
    H, p, x, r = householder(extend(A, b))
    res = norm(r)
    # calculate residual "manually" for determined systems
    if res == 0:
        res = norm(residual(A, x, b))
    return matrix(x, **kwargs), res
Beispiel #17
0
def solve():
    m1 = matrix(int(noVarScale.get()), int(noVarScale.get()))
    for i in range(int(noVarScale.get())):
        listOfRowNum = []
        for j in range(1, int(noVarScale.get()) + 1):
            #listOfRowNum.append("num"+str(i)+str(j))
            #print(i, j)
            num1 = numsTotal[i + 1]["num" + str(i + 1) + str(j)].get()
            listOfRowNum.append(num1)
        m1.insertRow(listOfRowNum)

    m2 = matrix(int(noVarScale.get()), 1)
    for i in range(int(noVarScale.get())):
        const1 = numsTotal[i + 1]["num" + str(i + 1) +
                                  str(int(noVarScale.get()) + 1)].get()
        m2.insertRow(const1)

    m1 = m1.inverse()
    answer1 = m1**m2

    refreshAnswerTable(answer1)
def view_transform(FROM, TO, UP):
    forward = TO - FROM
    forward = forward.normalize()
    upn = UP.normalize()
    left = forward.cross(upn)
    true_up = left.cross(forward)
    orientation = matrix(
        4, 4, ([[left.val[0], left.val[1], left.val[2], 0],
                [true_up.val[0], true_up.val[1], true_up.val[2], 0],
                [-forward.val[0], -forward.val[1], -forward.val[2], 0],
                [0, 0, 0, 1]]))
    return orientation * translation(-FROM.val[0], -FROM.val[1], -FROM.val[2])
def jacobian(f, x):
    """
    Calculate the Jacobian matrix of a function at the point x0.

    This is the first derivative of a vectorial function:

        f : R^m -> R^n with m >= n
    """
    x = matrix(x)
    h = sqrt(eps)
    fx = matrix(f(*x))
    m = len(fx)
    n = len(x)
    J = matrix(m, n)
    for j in xrange(n):
        xj = x.copy()
        xj[j] += h
        Jj = (matrix(f(*xj)) - fx) / h
        for i in xrange(m):
            J[i, j] = Jj[i]
    return J
Beispiel #20
0
def jacobian(f, x):
    """
    Calculate the Jacobian matrix of a function at the point x0.

    This is the first derivative of a vectorial function:

        f : R^m -> R^n with m >= n
    """
    x = matrix(x)
    h = sqrt(eps)
    fx = matrix(f(*x))
    m = len(fx)
    n = len(x)
    J = matrix(m, n)
    for j in xrange(n):
        xj = x.copy()
        xj[j] += h
        Jj = (matrix(f(*xj)) - fx) / h
        for i in xrange(m):
            J[i,j] = Jj[i]
    return J
Beispiel #21
0
 def __init__(self, f, x0, **kwargs):
     self.f = f
     if isinstance(x0, (tuple, list)):
         x0 = matrix(x0)
     assert x0.cols == 1, 'need a vector'
     self.x0 = x0
     if 'J' in kwargs:
         self.J = kwargs['J']
     else:
         def J(*x):
             return jacobian(f, x)
         self.J = J
     self.norm = kwargs['norm']
     self.verbose = kwargs['verbose']
 def __iter__(self):
     f = self.f
     x0 = self.x0
     norm = self.norm
     J = self.J
     fx = matrix(f(*x0))
     fxnorm = norm(fx)
     cancel = False
     while not cancel:
         # get direction of descent
         fxn = -fx
         Jx = J(*x0)
         s = lu_solve(Jx, fxn)
         if self.verbose:
             print 'Jx:'
             print Jx
             print 's:', s
         # damping step size TODO: better strategy (hard task)
         l = one
         x1 = x0 + s
         while True:
             if x1 == x0:
                 if self.verbose:
                     print "canceled, won't get more excact"
                 cancel = True
                 break
             fx = matrix(f(*x1))
             newnorm = norm(fx)
             if newnorm < fxnorm:
                 # new x accepted
                 fxnorm = newnorm
                 x0 = x1
                 break
             l /= 2
             x1 = x0 + l * s
         yield (x0, fxnorm)
Beispiel #23
0
 def __iter__(self):
     f = self.f
     x0 = self.x0
     norm = self.norm
     J = self.J
     fx = matrix(f(*x0))
     fxnorm = norm(fx)
     cancel = False
     while not cancel:
         # get direction of descent
         fxn = -fx
         Jx = J(*x0)
         s = lu_solve(Jx, fxn)
         if self.verbose:
             print 'Jx:'
             print Jx
             print 's:', s
         # damping step size TODO: better strategy (hard task)
         l = one
         x1 = x0 + s
         while True:
             if x1 == x0:
                 if self.verbose:
                     print "canceled, won't get more excact"
                 cancel = True
                 break
             fx = matrix(f(*x1))
             newnorm = norm(fx)
             if newnorm < fxnorm:
                 # new x accepted
                 fxnorm = newnorm
                 x0 = x1
                 break
             l /= 2
             x1 = x0 + l*s
         yield (x0, fxnorm)
    def __init__(self, f, x0, **kwargs):
        self.f = f
        if isinstance(x0, (tuple, list)):
            x0 = matrix(x0)
        assert x0.cols == 1, 'need a vector'
        self.x0 = x0
        if 'J' in kwargs:
            self.J = kwargs['J']
        else:

            def J(*x):
                return jacobian(f, x)

            self.J = J
        self.norm = kwargs['norm']
        self.verbose = kwargs['verbose']
Beispiel #25
0
def det(A):
    """
    Calculate the determinant of a matrix.
    """
    # do not overwrite A
    A = matrix(A).copy()
    # use LU factorization to calculate determinant
    try:
        R, p = LU_decomp(A)
    except ZeroDivisionError:
        return 0
    z = 1
    for i, e in enumerate(p):
        if i != e:
            z *= -1
    for i in xrange(A.rows):
        z *= R[i,i]
    return z
Beispiel #26
0
def det(A):
    """
    Calculate the determinant of a matrix.
    """
    # do not overwrite A
    A = matrix(A).copy()
    # use LU factorization to calculate determinant
    try:
        R, p = LU_decomp(A)
    except ZeroDivisionError:
        return 0
    z = 1
    for i, e in enumerate(p):
        if i != e:
            z *= -1
    for i in xrange(A.rows):
        z *= R[i, i]
    return z
Beispiel #27
0
def cholesky(A):
    """
    Cholesky decompositon of a symmetric positive-definite matrix.

    Can be used to solve linear equation systems twice as efficient compared
    to LU decomposition or to test whether A is positive-definite.

    A = L * L.T
    Only L (the lower part) is returned.
    """
    assert isinstance(A, matrix)
    if not A.rows == A.cols:
        raise ValueError('need n*n matrix')
    n = A.rows
    L = matrix(n)
    for j in xrange(n):
        s = A[j,j] - fsum(L[j,k]**2 for k in xrange(j))
        if s < eps:
            raise ValueError('matrix not positive-definite')
        L[j,j] = sqrt(s)
        for i in xrange(j, n):
            L[i,j] = (A[i,j] - fsum(L[i,k] * L[j,k] for k in xrange(j))) \
                     / L[j,j]
    return L
Beispiel #28
0
def cholesky(A):
    """
    Cholesky decompositon of a symmetric positive-definite matrix.

    Can be used to solve linear equation systems twice as efficient compared
    to LU decomposition or to test whether A is positive-definite.

    A = L * L.T
    Only L (the lower part) is returned.
    """
    assert isinstance(A, matrix)
    if not A.rows == A.cols:
        raise ValueError('need n*n matrix')
    n = A.rows
    L = matrix(n)
    for j in xrange(n):
        s = A[j, j] - sum((L[j, k]**2 for k in xrange(j)))
        if s < eps:
            raise ValueError('matrix not positive-definite')
        L[j, j] = sqrt(s)
        for i in xrange(j, n):
            L[i,j] = (A[i,j] - sum((L[i,k] * L[j,k] for k in xrange(j)))) \
                     / L[j,j]
    return L
def scaling(x, y, z):
    """
    returns a scaling matrix
    """
    return matrix(4, 4,
                  ([[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]]))
Beispiel #30
0
#!/usr/bin/python3
if __name__ == "__main__":
    from matrices import matrix
    from dir_control import dir
    from find_route import f

    start = (3, 5)
    fin = (5, 5)
    mtx = matrix(1)

    dir_mov = dir(start, fin)
    print(dir_mov)
Beispiel #31
0
def findroot(f, x0, solver=Secant, tol=None, verbose=False, verify=True,
             force_type=mpmathify, **kwargs):
    r"""
    Find a solution to `f(x) = 0`, using *x0* as starting point or
    interval for *x*.

    Multidimensional overdetermined systems are supported.
    You can specify them using a function or a list of functions.

    If the found root does not satisfy `|f(x)^2 < \mathrm{tol}|`,
    an exception is raised (this can be disabled with *verify=False*).

    **Arguments**

    *f*
        one dimensional function
    *x0*
        starting point, several starting points or interval (depends on solver)
    *tol*
        the returned solution has an error smaller than this
    *verbose*
        print additional information for each iteration if true
    *verify*
        verify the solution and raise a ValueError if `|f(x) > \mathrm{tol}|`
    *force_type*
        use specified type constructor on starting points
    *solver*
        a generator for *f* and *x0* returning approximative solution and error
    *maxsteps*
        after how many steps the solver will cancel
    *df*
        first derivative of *f* (used by some solvers)
    *d2f*
        second derivative of *f* (used by some solvers)
    *multidimensional*
        force multidimensional solving
    *J*
        Jacobian matrix of *f* (used by multidimensional solvers)
    *norm*
        used vector norm (used by multidimensional solvers)

    solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
    yielding pairs of approximative solution and estimated error (which is
    expected to be positive).
    You can use the following string aliases:
    'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
    'ridder', 'anewton', 'bisect'

    See mpmath.optimization for their documentation.

    **Examples**

    The function :func:`findroot` locates a root of a given function using the
    secant method by default. A simple example use of the secant method is to
    compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::

        >>> from mpmath import *
        >>> mp.dps = 30
        >>> print findroot(sin, 3)
        3.14159265358979323846264338328

    The secant method can be used to find complex roots of analytic functions,
    although it must in that case generally be given a nonreal starting value
    (or else it will never leave the real line)::

        >>> mp.dps = 15
        >>> print findroot(lambda x: x**3 + 2*x + 1, j)
        (0.226698825758202 + 1.46771150871022j)

    A nice application is to compute nontrivial roots of the Riemann zeta
    function with many digits (good initial values are needed for convergence)::

        >>> mp.dps = 30
        >>> print findroot(zeta, 0.5+14j)
        (0.5 + 14.1347251417346937904572519836j)

    The secant method can also be used as an optimization algorithm, by passing
    it a derivative of a function. The following example locates the positive
    minimum of the gamma function::

        >>> mp.dps = 20
        >>> print findroot(lambda x: diff(gamma, x), 1)
        1.4616321449683623413

    Finally, a useful application is to compute inverse functions, such as the
    Lambert W function which is the inverse of `w e^w`, given the first
    term of the solution's asymptotic expansion as the initial value. In basic
    cases, this gives identical results to mpmath's builtin ``lambertw``
    function::

        >>> def lambert(x):
        ...     return findroot(lambda w: w*exp(w) - x, log(1+x))
        ...
        >>> mp.dps = 15
        >>> print lambert(1), lambertw(1)
        0.567143290409784 0.567143290409784
        >>> print lambert(1000), lambert(1000)
        5.2496028524016 5.2496028524016

    Multidimensional functions are also supported::

        >>> f = [lambda x1, x2: x1**2 + x2,
        ...      lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
        >>> findroot(f, (0, 0))
        matrix(
        [['-0.618033988749895'],
         ['-0.381966011250105']])
        >>> findroot(f, (10, 10))
        matrix(
        [['1.61803398874989'],
         ['-2.61803398874989']])

    You can verify this by solving the system manually.

    Please note that the following (more general) syntax also works::

        >>> def f(x1, x2):
        ...     return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
        ...
        >>> findroot(f, (0, 0))
        matrix(
        [['-0.618033988749895'],
         ['-0.381966011250105']])


    **Multiple roots**

    For multiple roots all methods of the Newtonian family (including secant)
    converge slowly. Consider this example::

        >>> f = lambda x: (x - 1)**99
        >>> findroot(f, 0.9, verify=False)
        mpf('0.91807354244492868')

    Even for a very close starting point the secant method converges very
    slowly. Use ``verbose=True`` to illustrate this.

    It is possible to modify Newton's method to make it converge regardless of
    the root's multiplicity::

        >>> findroot(f, -10, solver='mnewton')
        mpf('1.0')

    This variant uses the first and second derivative of the function, which is
    not very efficient.

    Alternatively you can use an experimental Newtonian solver that keeps track
    of the speed of convergence and accelerates it using Steffensen's method if
    necessary::

        >>> findroot(f, -10, solver='anewton', verbose=True)
        x: -9.88888888888888888889
        error: 0.111111111111111111111
        converging slowly
        x: -9.77890011223344556678
        error: 0.10998877665544332211
        converging slowly
        x: -9.67002233332199662166
        error: 0.108877778911448945119
        converging slowly
        accelerating convergence
        x: -9.5622443299551077669
        error: 0.107778003366888854764
        converging slowly
        x: 0.99999999999999999214
        error: 10.562244329955107759
        x: 1.0
        error: 7.8598304758094664213e-18
        mpf('1.0')


    **Complex roots**

    For complex roots it's recommended to use Muller's method as it converges
    even for real starting points very fast::

        >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
        mpc(real='0.72713608449119684', imag='0.93409928946052944')


    **Intersection methods**

    When you need to find a root in a known interval, it's highly recommended to
    use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
    Usually they converge faster and more reliable. They have however problems
    with multiple roots and usually need a sign change to find a root::

        >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
        mpf('0.0')

    Be careful with symmetric functions::

        >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
        Traceback (most recent call last):
          ...
        ZeroDivisionError

    It fails even for better starting points, because there is no sign change::

        >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
        Traceback (most recent call last):
          ...
        ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
        Try another starting point or tweak arguments.

    """
    # initialize arguments
    if not force_type:
        force_type = lambda x: x
    elif not tol and (force_type == float or force_type == complex):
        tol = 2**(-42)
    kwargs['verbose'] = verbose
    if 'd1f' in kwargs:
        kwargs['df'] = kwargs['d1f']
    if tol is None:
        tol = eps * 2**10
    kwargs['tol'] = tol
    if isinstance(x0, (list, tuple)):
        x0 = [force_type(x) for x in x0]
    else:
        x0 = [force_type(x0)]
    if isinstance(solver, str):
        try:
            solver = str2solver[solver]
        except KeyError:
            raise ValueError('could not recognize solver')
    # accept list of functions
    if isinstance(f, (list, tuple)):
        f2 = copy(f)
        def tmp(*args):
            return [fn(*args) for fn in f2]
        f = tmp
    # detect multidimensional functions
    try:
        fx = f(*x0)
        multidimensional = isinstance(fx, (list, tuple, matrix))
    except TypeError:
        fx = f(x0[0])
        multidimensional = False
    if 'multidimensional' in kwargs:
        multidimensional = kwargs['multidimensional']
    if multidimensional:
        # only one multidimensional solver available at the moment
        solver = MDNewton
        if not 'norm' in kwargs:
            norm = lambda x: norm_(x, mpf('inf'))
            kwargs['norm'] = norm
        else:
            norm = kwargs['norm']
    else:
        norm = abs
    # happily return starting point if it's a root
    if norm(fx) == 0:
        if multidimensional:
            return matrix(x0)
        else:
            return x0[0]
    # use solver
    iterations = solver(f, x0, **kwargs)
    if 'maxsteps' in kwargs:
        maxsteps = kwargs['maxsteps']
    else:
        maxsteps = iterations.maxsteps
    i = 0
    for x, error in iterations:
        if verbose:
            print 'x:    ', x
            print 'error:', error
        i += 1
        if error < tol * max(1, norm(x)) or i >= maxsteps:
            break
    if not isinstance(x, (list, tuple, matrix)):
        xl = [x]
    else:
        xl = x
    if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
        raise ValueError('Could not find root within given tolerance. '
                         '(%g > %g)\n'
                         'Try another starting point or tweak arguments.'
                         % (norm(f(*xl))**2, tol))
    return x
import numpy as np
from matrices import matrix

#This code shows how to effectively use the matrix class

#Creating a 2 by 4 matrix with elements initialized to 0
m1 = matrix(np.zeros((2, 4)))
print(m1)

#Creating a 2 by 4 matrix with elements initialized to 1
m2 = matrix(np.ones((2, 4)))
print(m2)

#Creating a 4 by 4 matrix initialized to identity
m3 = matrix(np.identity(4))
print(m3)

#Multiplying 2 matrices of compatible size
m4 = m2 * m3
print(m3)

#Obtaining the number of rows and columns for a matrix
print(m3.getNumberOfRows())
print(m3.getNumberOfColumns())

#Testing for equality between matrices
m1 = matrix(np.identity(3))
m2 = matrix(np.identity(4))

if m1 == m2:
    print("Matrices are equal")
def y_rotation(rad):
    """
    returns a rotation matrix in teh y direction
    """
    return matrix(4, 4, ([[cos(rad), 0, sin(rad), 0], [0, 1, 0, 0],
                          [-sin(rad), 0, cos(rad), 0], [0, 0, 0, 1]]))
def shearing(xy, xz, yx, yz, zx, zy):
    """
    returns a shearing matrix
    """
    return matrix(
        4, 4, ([[1, xy, xz, 0], [yx, 1, yz, 0], [zx, zy, 1, 0], [0, 0, 0, 1]]))
def translation(x, y, z):
    """
    returns a transformation matrix
    """
    return matrix(4, 4,
                  ([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]))
def to_matrix(Tuple):
    """
    converts a Tuple(point or a vector) into a matrix
    """
    return matrix(4, 4, ([[Tuple.val[0], 0, 0, 0], [0, Tuple.val[1], 0, 0],
                          [0, 0, Tuple.val[2], 0], [0, 0, 0, Tuple.val[3]]]))
def findroot(f,
             x0,
             solver=Secant,
             tol=None,
             verbose=False,
             verify=True,
             force_type=mpmathify,
             **kwargs):
    r"""
    Find a solution to `f(x) = 0`, using *x0* as starting point or
    interval for *x*.

    Multidimensional overdetermined systems are supported.
    You can specify them using a function or a list of functions.

    If the found root does not satisfy `|f(x)^2 < \mathrm{tol}|`,
    an exception is raised (this can be disabled with *verify=False*).

    **Arguments**

    *f*
        one dimensional function
    *x0*
        starting point, several starting points or interval (depends on solver)
    *tol*
        the returned solution has an error smaller than this
    *verbose*
        print additional information for each iteration if true
    *verify*
        verify the solution and raise a ValueError if `|f(x) > \mathrm{tol}|`
    *force_type*
        use specified type constructor on starting points
    *solver*
        a generator for *f* and *x0* returning approximative solution and error
    *maxsteps*
        after how many steps the solver will cancel
    *df*
        first derivative of *f* (used by some solvers)
    *d2f*
        second derivative of *f* (used by some solvers)
    *multidimensional*
        force multidimensional solving
    *J*
        Jacobian matrix of *f* (used by multidimensional solvers)
    *norm*
        used vector norm (used by multidimensional solvers)

    solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
    yielding pairs of approximative solution and estimated error (which is
    expected to be positive).
    You can use the following string aliases:
    'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
    'ridder', 'anewton', 'bisect'

    See mpmath.optimization for their documentation.

    **Examples**

    The function :func:`findroot` locates a root of a given function using the
    secant method by default. A simple example use of the secant method is to
    compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::

        >>> from mpmath import *
        >>> mp.dps = 30; mp.pretty = True
        >>> findroot(sin, 3)
        3.14159265358979323846264338328

    The secant method can be used to find complex roots of analytic functions,
    although it must in that case generally be given a nonreal starting value
    (or else it will never leave the real line)::

        >>> mp.dps = 15
        >>> findroot(lambda x: x**3 + 2*x + 1, j)
        (0.226698825758202 + 1.46771150871022j)

    A nice application is to compute nontrivial roots of the Riemann zeta
    function with many digits (good initial values are needed for convergence)::

        >>> mp.dps = 30
        >>> findroot(zeta, 0.5+14j)
        (0.5 + 14.1347251417346937904572519836j)

    The secant method can also be used as an optimization algorithm, by passing
    it a derivative of a function. The following example locates the positive
    minimum of the gamma function::

        >>> mp.dps = 20
        >>> findroot(lambda x: diff(gamma, x), 1)
        1.4616321449683623413

    Finally, a useful application is to compute inverse functions, such as the
    Lambert W function which is the inverse of `w e^w`, given the first
    term of the solution's asymptotic expansion as the initial value. In basic
    cases, this gives identical results to mpmath's built-in ``lambertw``
    function::

        >>> def lambert(x):
        ...     return findroot(lambda w: w*exp(w) - x, log(1+x))
        ...
        >>> mp.dps = 15
        >>> lambert(1); lambertw(1)
        0.567143290409784
        0.567143290409784
        >>> lambert(1000); lambert(1000)
        5.2496028524016
        5.2496028524016

    Multidimensional functions are also supported::

        >>> f = [lambda x1, x2: x1**2 + x2,
        ...      lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
        >>> findroot(f, (0, 0))
        matrix(
        [['-0.618033988749895'],
         ['-0.381966011250105']])
        >>> findroot(f, (10, 10))
        matrix(
        [['1.61803398874989'],
         ['-2.61803398874989']])

    You can verify this by solving the system manually.

    Please note that the following (more general) syntax also works::

        >>> def f(x1, x2):
        ...     return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
        ...
        >>> findroot(f, (0, 0))
        matrix(
        [['-0.618033988749895'],
         ['-0.381966011250105']])


    **Multiple roots**

    For multiple roots all methods of the Newtonian family (including secant)
    converge slowly. Consider this example::

        >>> f = lambda x: (x - 1)**99
        >>> findroot(f, 0.9, verify=False)
        0.918073542444929

    Even for a very close starting point the secant method converges very
    slowly. Use ``verbose=True`` to illustrate this.

    It is possible to modify Newton's method to make it converge regardless of
    the root's multiplicity::

        >>> findroot(f, -10, solver='mnewton')
        1.0

    This variant uses the first and second derivative of the function, which is
    not very efficient.

    Alternatively you can use an experimental Newtonian solver that keeps track
    of the speed of convergence and accelerates it using Steffensen's method if
    necessary::

        >>> findroot(f, -10, solver='anewton', verbose=True)
        x: -9.88888888888888888889
        error: 0.111111111111111111111
        converging slowly
        x: -9.77890011223344556678
        error: 0.10998877665544332211
        converging slowly
        x: -9.67002233332199662166
        error: 0.108877778911448945119
        converging slowly
        accelerating convergence
        x: -9.5622443299551077669
        error: 0.107778003366888854764
        converging slowly
        x: 0.99999999999999999214
        error: 10.562244329955107759
        x: 1.0
        error: 7.8598304758094664213e-18
        1.0


    **Complex roots**

    For complex roots it's recommended to use Muller's method as it converges
    even for real starting points very fast::

        >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
        (0.727136084491197 + 0.934099289460529j)


    **Intersection methods**

    When you need to find a root in a known interval, it's highly recommended to
    use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
    Usually they converge faster and more reliable. They have however problems
    with multiple roots and usually need a sign change to find a root::

        >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
        0.0

    Be careful with symmetric functions::

        >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
        Traceback (most recent call last):
          ...
        ZeroDivisionError

    It fails even for better starting points, because there is no sign change::

        >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
        Traceback (most recent call last):
          ...
        ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
        Try another starting point or tweak arguments.

    """
    # initialize arguments
    if not force_type:
        force_type = lambda x: x
    elif not tol and (force_type == float or force_type == complex):
        tol = 2**(-42)
    kwargs['verbose'] = verbose
    if 'd1f' in kwargs:
        kwargs['df'] = kwargs['d1f']
    if tol is None:
        tol = eps * 2**10
    kwargs['tol'] = tol
    if isinstance(x0, (list, tuple)):
        x0 = [force_type(x) for x in x0]
    else:
        x0 = [force_type(x0)]
    if isinstance(solver, str):
        try:
            solver = str2solver[solver]
        except KeyError:
            raise ValueError('could not recognize solver')
    # accept list of functions
    if isinstance(f, (list, tuple)):
        f2 = copy(f)

        def tmp(*args):
            return [fn(*args) for fn in f2]

        f = tmp
    # detect multidimensional functions
    try:
        fx = f(*x0)
        multidimensional = isinstance(fx, (list, tuple, matrix))
    except TypeError:
        fx = f(x0[0])
        multidimensional = False
    if 'multidimensional' in kwargs:
        multidimensional = kwargs['multidimensional']
    if multidimensional:
        # only one multidimensional solver available at the moment
        solver = MDNewton
        if not 'norm' in kwargs:
            norm = lambda x: norm_(x, mpf('inf'))
            kwargs['norm'] = norm
        else:
            norm = kwargs['norm']
    else:
        norm = abs
    # happily return starting point if it's a root
    if norm(fx) == 0:
        if multidimensional:
            return matrix(x0)
        else:
            return x0[0]
    # use solver
    iterations = solver(f, x0, **kwargs)
    if 'maxsteps' in kwargs:
        maxsteps = kwargs['maxsteps']
    else:
        maxsteps = iterations.maxsteps
    i = 0
    for x, error in iterations:
        if verbose:
            print 'x:    ', x
            print 'error:', error
        i += 1
        if error < tol * max(1, norm(x)) or i >= maxsteps:
            break
    if not isinstance(x, (list, tuple, matrix)):
        xl = [x]
    else:
        xl = x
    if verify and norm(f(*xl))**2 > tol:  # TODO: better condition?
        raise ValueError('Could not find root within given tolerance. '
                         '(%g > %g)\n'
                         'Try another starting point or tweak arguments.' %
                         (norm(f(*xl))**2, tol))
    return x
def x_rotation(rad):
    """
    returns a rotation matrix in teh x direction
    """
    return matrix(4, 4, ([[1, 0, 0, 0], [0, cos(rad), -sin(rad), 0],
                          [0, sin(rad), cos(rad), 0], [0, 0, 0, 1]]))