Example #1
0
def constrained_relaxation(A,
                           b,
                           x0,
                           x_min,
                           x_max,
                           max_iter=100000,
                           tolerance=1e-10):
    """Solve Ax=b subject to the constraints that 
    x_i > x_min_i and x_i < x_max_i. Algorithm is from Axelson 1996.
    
    Note that x_min/Max are both lists/arrays of length equal to x"""

    #define functional corresponding to Ax=b
    def J(x, A, b):
        """Functional of x which corresponds to Ax=b for 
        the relaxation method used."""
        answer = np.dot(np.dot(np.dot(x.T, A.T), A),
                        x) - 2 * np.dot(np.dot(b.T, A), x)
        return answer

    ai = x_min
    bi = x_max
    N = len(x0)

    def find_min(q):
        u[q] = 0
        v = np.dot(A, u)
        num1 = 0
        num2 = 0
        denom = 0
        Aq = A[:, q]
        for k in range(0, N):
            num2 += np.dot(v, Aq)
            num1 += np.dot(b, Aq)
            denom += np.dot(Aq, Aq)
        zeta = (num1 - num2) / denom
        if zeta > bi[q]: zeta = bi[q]
        if zeta < ai[q]: zeta = ai[q]
        return zeta

    u = catmap.copy(x0)
    nIter = 0
    converged = False
    while nIter < max_iter and converged == False:
        nIter += 1
        fOld = J(u, A, b)
        for j in range(0, N):
            u[j] = find_min(j)
        fNew = J(u, A, b)
        fDiff = fOld - fNew
        if np.linalg.norm(fDiff) < tolerance:
            converged = True

    if converged == True:
        return u
    else:
        raise ValueError('Constrained relaxation did not converge.' +
                         'Residual was ' + str(np.linalg.norm(fDiff)))
Example #2
0
def constrained_relaxation(
        A,b,x0,x_min,x_max,max_iter = 100000,tolerance = 1e-10):
    """Solve Ax=b subject to the constraints that 
    x_i > x_min_i and x_i < x_max_i. Algorithm is from Axelson 1996.
    
    Note that x_min/Max are both lists/arrays of length equal to x"""

    #define functional corresponding to Ax=b
    def J(x,A,b):
        """Functional of x which corresponds to Ax=b for 
        the relaxation method used."""
        answer =  np.dot(
                np.dot(np.dot(x.T,A.T),A),x) - 2*np.dot(np.dot(b.T,A),x)
        return answer

    ai = x_min
    bi = x_max
    N = len(x0)

    def find_min(q):
        u[q] = 0
        v = np.dot(A,u)
        num1 = 0
        num2 = 0
        denom = 0
        Aq = A[:,q]
        for k in range(0,N):
            num2 += np.dot(v,Aq)
            num1 += np.dot(b,Aq)
            denom += np.dot(Aq,Aq)
        zeta = (num1-num2)/denom
        if zeta > bi[q]: zeta = bi[q]
        if zeta < ai[q]: zeta = ai[q]
        return zeta

    u = catmap.copy(x0)
    nIter =0
    converged = False
    while nIter < max_iter and converged == False:
        nIter += 1
        fOld = J(u,A,b)
        for j in range(0,N):
            u[j] = find_min(j)
        fNew = J(u,A,b)
        fDiff = fOld - fNew
        if np.linalg.norm(fDiff) < tolerance:
            converged = True

    if converged == True:
        return u
    else:
        raise ValueError('Constrained relaxation did not converge.'+
                'Residual was '+str(np.linalg.norm(fDiff)))
Example #3
0
def constrained_relaxation(A,
                           b,
                           x0,
                           x_min,
                           x_max,
                           max_iter=100000,
                           tolerance=1e-10):
    """
    Solve Ax=b subject to the constraints that 
    x_i > x_min_i and x_i < x_max_i. Algorithm is from Axelson 1996.
    
    Note that x_min/Max are both lists/arrays of length equal to x

    :param A: A matrix.

    :type A: numpy.array

    :param b: b vector.

    :type b: numpy.array

    :param x0: x vector

    :type x0: numpy.array

    :param x_min: Minimum constraints.

    :type x_min: array_like

    :param x_max: Maximum constraints.

    :type x_max: array_like

    :param max_iter: Maximum number of iterations.

    :type max_iter: int, optional

    :param tolerance: Tolerance.

    :type tolerance: float, optional

    .. todo:: Check to make sure docstring is correct.
    """

    #define functional corresponding to Ax=b
    def J(x, A, b):
        """
        Functional of x which corresponds to Ax=b for 
        the relaxation method used.
        
        :param x: x vector.

        :type x: array_like

        :param A: A matrix.

        :type A: numpy.array

        :param b: b vector.

        :param b: array_like

        .. todo:: Check that docstring is correct
        """
        answer = np.dot(np.dot(np.dot(x.T, A.T), A),
                        x) - 2 * np.dot(np.dot(b.T, A), x)
        return answer

    ai = x_min
    bi = x_max
    N = len(x0)

    def find_min(q):
        """
        Find minimum

        .. todo:: Explain what this does in the context of constrained_relaxation.
        """
        u[q] = 0
        v = np.dot(A, u)
        num1 = 0
        num2 = 0
        denom = 0
        Aq = A[:, q]
        for k in range(0, N):
            num2 += np.dot(v, Aq)
            num1 += np.dot(b, Aq)
            denom += np.dot(Aq, Aq)
        zeta = (num1 - num2) / denom
        if zeta > bi[q]: zeta = bi[q]
        if zeta < ai[q]: zeta = ai[q]
        return zeta

    u = catmap.copy(x0)
    nIter = 0
    converged = False
    while nIter < max_iter and converged == False:
        nIter += 1
        fOld = J(u, A, b)
        for j in range(0, N):
            u[j] = find_min(j)
        fNew = J(u, A, b)
        fDiff = fOld - fNew
        if np.linalg.norm(fDiff) < tolerance:
            converged = True

    if converged == True:
        return u
    else:
        raise ValueError('Constrained relaxation did not converge.' +
                         'Residual was ' + str(np.linalg.norm(fDiff)))
Example #4
0
def constrained_relaxation(
        A,b,x0,x_min,x_max,max_iter = 100000,tolerance = 1e-10):
    """
    Solve Ax=b subject to the constraints that 
    x_i > x_min_i and x_i < x_max_i. Algorithm is from Axelson 1996.
    
    Note that x_min/Max are both lists/arrays of length equal to x

    :param A: A matrix.

    :type A: numpy.array

    :param b: b vector.

    :type b: numpy.array

    :param x0: x vector

    :type x0: numpy.array

    :param x_min: Minimum constraints.

    :type x_min: array_like

    :param x_max: Maximum constraints.

    :type x_max: array_like

    :param max_iter: Maximum number of iterations.

    :type max_iter: int, optional

    :param tolerance: Tolerance.

    :type tolerance: float, optional

    .. todo:: Check to make sure docstring is correct.
    """

    #define functional corresponding to Ax=b
    def J(x,A,b):
        """
        Functional of x which corresponds to Ax=b for 
        the relaxation method used.
        
        :param x: x vector.

        :type x: array_like

        :param A: A matrix.

        :type A: numpy.array

        :param b: b vector.

        :param b: array_like

        .. todo:: Check that docstring is correct
        """
        answer =  np.dot(
                np.dot(np.dot(x.T,A.T),A),x) - 2*np.dot(np.dot(b.T,A),x)
        return answer

    ai = x_min
    bi = x_max
    N = len(x0)

    def find_min(q):
        """
        Find minimum

        .. todo:: Explain what this does in the context of constrained_relaxation.
        """
        u[q] = 0
        v = np.dot(A,u)
        num1 = 0
        num2 = 0
        denom = 0
        Aq = A[:,q]
        for k in range(0,N):
            num2 += np.dot(v,Aq)
            num1 += np.dot(b,Aq)
            denom += np.dot(Aq,Aq)
        zeta = (num1-num2)/denom
        if zeta > bi[q]: zeta = bi[q]
        if zeta < ai[q]: zeta = ai[q]
        return zeta

    u = catmap.copy(x0)
    nIter =0
    converged = False
    while nIter < max_iter and converged == False:
        nIter += 1
        fOld = J(u,A,b)
        for j in range(0,N):
            u[j] = find_min(j)
        fNew = J(u,A,b)
        fDiff = fOld - fNew
        if np.linalg.norm(fDiff) < tolerance:
            converged = True

    if converged == True:
        return u
    else:
        raise ValueError('Constrained relaxation did not converge.'+
                'Residual was '+str(np.linalg.norm(fDiff)))