def test_invariants(): A = MatrixSymbol('A', n, m) B = MatrixSymbol('B', m, l) X = MatrixSymbol('X', n, n) objs = [Identity(n), ZeroMatrix(m, n), A, MatMul(A, B), MatAdd(A, A), Transpose(A), Adjoint(A), Inverse(X), MatPow(X, 2), MatPow(X, -1), MatPow(X, 0)] for obj in objs: assert obj == obj.__class__(*obj.args)
def test_inverse(): raises(ShapeError, lambda: Inverse(A)) assert Inverse(Inverse(C)) == C assert Inverse(C) * C == Identity(C.rows) assert Inverse(eye(3)) == eye(3) assert Inverse(S(3)) == S(1) / 3 assert Inverse(Identity(n)) == Identity(n) # Simplifies Muls if possible (i.e. submatrices are square) assert Inverse(C * D) == D.I * C.I # But still works when not possible assert Inverse(A * E).is_Inverse # We play nice with traditional explicit matrices assert Inverse(Matrix([[1, 2], [3, 4]])) == Matrix([[1, 2], [3, 4]]).inv()
def test_Identity(): n, m = symbols('n m', integer=True) A = MatrixSymbol('A', n, m) In = Identity(n) Im = Identity(m) assert A * Im == A assert In * A == A assert Transpose(In) == In assert Inverse(In) == In
def test_Identity(): A = MatrixSymbol('A', n, m) In = Identity(n) Im = Identity(m) assert A * Im == A assert In * A == A assert Transpose(In) == In assert Inverse(In) == In assert In.conjugate() == In
def pdf(self, x): n, scale_matrix = self.n, self.scale_matrix p = scale_matrix.shape[0] if isinstance(x, list): x = ImmutableMatrix(x) if not isinstance(x, (MatrixBase, MatrixSymbol)): raise ValueError("%s should be an isinstance of Matrix " "or MatrixSymbol" % str(x)) sigma_inv_x = - Inverse(scale_matrix)*x / S(2) term1 = exp(Trace(sigma_inv_x))/((2**(p*n/S(2))) * multigamma(n/S(2), p)) term2 = (Determinant(scale_matrix))**(-n/S(2)) term3 = (Determinant(x))**(S(n - p - 1)/2) return term1 * term2 * term3
def pdf(self, x): alpha , beta , scale_matrix = self.alpha, self.beta, self.scale_matrix p = scale_matrix.shape[0] if isinstance(x, list): x = ImmutableMatrix(x) if not isinstance(x, (MatrixBase, MatrixSymbol)): raise ValueError("%s should be an isinstance of Matrix " "or MatrixSymbol" % str(x)) sigma_inv_x = - Inverse(scale_matrix)*x / beta term1 = exp(Trace(sigma_inv_x))/((beta**(p*alpha)) * multigamma(alpha, p)) term2 = (Determinant(scale_matrix))**(-alpha) term3 = (Determinant(x))**(alpha - S(p + 1)/2) return term1 * term2 * term3
def test_MatPow(): A = MatrixSymbol('A', n, n) AA = MatPow(A, 2) assert AA.exp == 2 assert AA.base == A assert (A**n).exp == n assert A**0 == Identity(n) assert A**1 == A assert A**2 == AA assert A**-1 == Inverse(A) assert A**S.Half == sqrt(A) raises(ShapeError, lambda: MatrixSymbol('B', 3, 2)**2)
def test_MatPow(): A = MatrixSymbol('A', n, n) AA = MatPow(A, 2) assert AA.exp == 2 assert AA.base == A assert (A**n).exp == n assert A**0 == Identity(n) assert A**1 == A assert A**2 == AA assert A**-1 == Inverse(A) assert (A**-1)**-1 == A assert (A**2)**3 == A**6 assert A**S.Half == sqrt(A) assert A**Rational(1, 3) == cbrt(A) raises(NonSquareMatrixError, lambda: MatrixSymbol('B', 3, 2)**2)
def test_xxinv(): assert xxinv(MatMul(D, Inverse(D), D, evaluate=False)) == \ MatMul(Identity(n), D, evaluate=False)
def test_combine_powers(): assert combine_powers(MatMul(D, Inverse(D), D, evaluate=False)) == \ MatMul(Identity(n), D, evaluate=False)
# importing sympy functions import sympy from sympy.matrices import Matrix, Inverse x1, x2 = sympy.symbols('x1, x2') XX1 = Matrix([0, 0]) # Sarting point at the origin XX1= [0,0] FUNC = (x1 + 1)**2 + (x2 + 3)**2 + 4 # Function F_PRIM = Matrix([[sympy.diff(FUNC, x1)], [sympy.diff(FUNC, x2)] ]) # Derivating FUNC with respect to x1 and than x2 H = Matrix( [[sympy.diff(F_PRIM[0], x1), sympy.diff(F_PRIM[1], x1)], [sympy.diff(F_PRIM[0], x2), sympy.diff(F_PRIM[1], x2)]] ) # Derivating F_PRIM with respect to x1 and than x2, becuase of quadratic funstion Hessian is obtained #################### NEWTON'S METHOD #################### XX2 = XX1 - Inverse(H) * F_PRIM.subs({ x1: XX1[0], x2: XX1[0] }) # XX2 = [-1, -3] print( f"Function is quadratic therefore converges in one step. Hessian = {H} is positive therefore X2 = {XX2} is function's minimum" ) ''' #Output: Function is quadratic therefore converges in one step. Hessian = [2, 0, 0, 2] is positive therefore X2 = [-1, -3] is function's minimum. '''
#################### NEWTON'S METHOD #################### F_PRIM = Matrix([0, 0]) # Setting up empty matrix for X_PRIM F_PRIM[0] = FUNC.diff( X[0]) # deriving FUNC with respect to x and y separatelly F_PRIM[1] = FUNC.diff(X[1]) # F_PRIM = [[1.0*x], [1000.0*y]] = H * X F_BIS = Matrix([[0, 0], [0, 0]]) # Setting up empty matrix for X_BIS F_BIS[0] = F_PRIM[0].diff( X[0]) # deriving F_PRIM with respect to x and y separatelly F_BIS[1] = F_PRIM[1].diff(X[0]) F_BIS[2] = F_PRIM[0].diff(X[1]) F_BIS[3] = F_PRIM[1].diff(X[1]) # F_BIS = [[1,0],[0,1000]] = H X2 = X - Inverse(F_BIS) * F_PRIM # X2 = [0, 0], cnvergence in one step print("Newton's Method") print(f" Newton's Method converges in one step, X2 = {list(X2)}\n") #################### GRADIENT DESCENT #################### X2_gd = X1 - F_PRIM.subs({X[0]: X1[0], X[1]: X1[1]}) X3_gd = X2_gd - F_PRIM.subs({X[0]: X2_gd[0], X[1]: X2_gd[1]}) print("Gradient Descent Method") print(f" After first step of Gradient Descent Method X2 = {list(X2_gd)}") print(f" After second step of Gradient Descent Method X3 = {list(X3_gd)}") print(" Gradient Descent Method Diverges.\n") #################### CONJUGATE DESCENT ####################