示例#1
0
    def append(self, arc):
        """Insert an arc at the end of the path

        :param arc: arc to be inserted
        :type arc: Arc

        :raise optimizationError: if the arc is not connected to the path
        """
        if arc.up != self.list_of_arcs[-1].down:
            raise excep.optimizationError(
                f'Arc {arc} cannot be inserted in a path after '
                f'arc {self.list_of_arcs[-1]}'
            )
        if arc in self.list_of_arcs:
            raise excep.optimizationError(
                f'Arc {arc} is already in the path. '
                f'It cannot be inserted as the path would not be '
                f'simple anymore.'
            )
        self.list_of_arcs.append(arc)
示例#2
0
    def prepend(self, arc):
        """Insert an arc at the beginning of the path

        :param arc: arc to be inserted
        :type arc: Arc

        :raise optimizationError: if the arc is not connected to the path
        """
        if arc.down != self.list_of_arcs[0].up:
            raise excep.optimizationError(
                f'Arc {arc} cannot be inserted in a path before '
                f'arc {self.list_of_arcs[0]}'
            )
        if arc in self.list_of_arcs:
            raise excep.optimizationError(
                f'Arc {arc} is already in the path. '
                f'It cannot be inserted as the path would not be '
                f'simple anymore.'
            )
        self.list_of_arcs.insert(0, arc)
示例#3
0
    def shortest_path_tree(self):
        """Extract the shortest path tree
        :return: the shortest path tree
        :rtype: Network

        :raise optimizationError: if the algorithm has not been run yet.
        """
        if self.results is None:
            raise excep.optimizationError(
                'The algorithm has not been executed yet.'
            )
        return Network(self.pred.values())
示例#4
0
def modifiedCholesky(H):
    """Algorithm 11.7: modified Cholesky factorization

    :param H: a square symmetric matrix
    :type H: np.array 2D

    :return: Cholesky factor of H + tau * I, and tau
    :rtype: tuple(np.array, float)

    :raise optimizationError: if the matrix is not square and symmetric
    """
    tau = 0.0
    m, n = H.shape
    if m != n:
        raise excep.optimizationError(
            f'The matrix must be square and not {m}x{n}.')
    if not (H.transpose() == H).all():
        raise excep.optimizationError('The matrix must be symmetric.')

    frobeniusNorm = max(np.linalg.norm(H), 1e-06)

    # Identify the smallest diagonal element
    mindiag = min(H.diagonal())
    if mindiag >= 0:
        # If non negative, we try tau = 0
        tau = 0
        R = H
    else:
        # If negative, we try tau = ||H||
        tau = frobeniusNorm
        R = H + tau * np.eye(n)
    # We check if the matrix is positive definite using its eigen values.
    mineig = min(np.linalg.eigvalsh(R))
    while mineig <= 0:
        # If it is not positive definite, we update tau
        tau = max(2 * tau, 0.5 * frobeniusNorm)
        R = H + tau * np.eye(n)
        mineig = min(np.linalg.eigvalsh(R))
    return np.linalg.cholesky(R), tau
示例#5
0
    def _node_from_name(self, node_name):
        """Retrieve a node from its name

        :param node_name: name of the node
        :type node_name: str

        :return: the node
        :rtype: Node

        :raise optimizationError: if the name if unknown
        :raise optimizationError: if several nodes share the same name
        """
        index = [
            idx
            for idx, node in enumerate(self.nodes)
            if node.name == node_name
        ]
        if len(index) < 1:
            raise excep.optimizationError(f'Node {node_name} is unknown')
        if len(index) > 1:
            raise excep.optimizationError(
                f'Several nodes have the same name: {node_name}'
            )
        return self.nodes[index[0]]
示例#6
0
def conjugateGradient(Q, b, x0):
    """Algorithm 9.2: conjugate gradient method to solve the problem

    .. math:: \\min_x \\frac{1}{2} x^T Q x + b^T x.

    :param Q: symmetric and positive definite matrix n x n
    :type Q: np.array 2D

    :param b: vector of size n
    :type b: np.array 1D

    :param x0: starting point for the iterations.
    :type x0: np.array 1D

    :return: minimum of the quadratic function, and details of the iterations:

           - the current iterate xk
           - the gradient gk
           - the direction dk
           - the step alphak
           - the step betak

    :rtype: np.array 1D,
            list([np.array 1D, np.array 1D, np.array 1D, float, float])

    :raise optimizationError: if the matrix is not positive definite.
    """
    n = len(x0)
    xk = x0
    gk = Q @ xk + b
    iters = list()
    dk = -gk
    betak = 0
    for _ in range(n):
        denom = np.inner(dk, Q @ dk)
        if denom <= 0:
            raise excep.optimizationError(
                'The matrix must be positive definite')
        alphak = -np.asscalar(dk.T @ gk) / denom
        iters.append([xk, gk, dk, alphak, betak])
        xk = xk + alphak * dk
        gkp1 = Q @ xk + b
        betak = np.inner(gkp1, gkp1) / np.inner(gk, gk)
        dk = -gkp1 + betak * dk
        gk = gkp1
    iters.append([xk, gk, dk, alphak, betak])
    return xk, iters
示例#7
0
def quadraticDirect(Q, b):
    """Algorithm 9.1: quadratic problems: direct solution

    :param Q: symmetric and positive definite matrix n x n
    :type Q: np.array 2D

    :param b: vector of size n
    :type b: np.array 1D

    :return: minimum of the quadratic function
    :rtype: np.array 1D

    :raise optimizationError: if the matrix is not positive definite.
    """
    try:
        L = la.cholesky(Q).T
    except np.linalg.LinAlgError as e:
        raise excep.optimizationError(
            'The matrix must be positive definite') from e

    y = la.solve_triangular(L, -b, lower=True)
    solution = la.solve_triangular(L.T, y, lower=False)
    return solution
示例#8
0
def lineSearch(obj, x, d, alpha0, beta1, beta2, lbd=3):
    """Algorithm 11.5: line search

    :param obj: function returning the value of the objective function
        and its gradient.

    :type obj: f, g = fct(x)

    :param x: point where the line search starts.
    :type x: numpy array

    :param d: direction along which the line search is performed.
    :type d: numpy array (size dimension as x)

    :param alpha0: first trial for the step
    :type alpha0: float. Must be positive.

    :param beta1: parameter for the first Wolfe condition.
    :type beta1: float. Must be strictly between 0 and 1.

    :param beta2: parameter for the second Wolfe condition.
    :type beta2: float. Must be strictly between 0 and 1,
                 and beta2 > beta1.

    :param lbd: extension factor. Must be > 1. Default: 2.0
    :type lbd: float.

    :return: step found, and, for each iteration:

        - alpha,
        - the lower bound,
        - the upperbound,
        - the reason why the step has been updated.

    :rtype: float, list([tuple(float, float, float, str])

    :raise optimizationError: if lbd <= 1
    :raise optimizationError: if alpha0 <= 0
    :raise optimizationError: if beta1 <= 0 or beta1 >= 1
    :raise optimizationError: if beta2 >= 1
    :raise optimizationError: if beta1 >= beta2
    :raise optimizationError: if d is not a descent direction

    """
    if lbd <= 1:
        raise excep.optimizationError(f'lambda is {lbd} and must be > 1')
    if alpha0 <= 0:
        raise excep.optimizationError(f'alpha0 is {alpha0} and must be > 0')
    if beta1 <= 0 or beta1 >= 1:
        raise excep.optimizationError(
            f'beta1 = {beta1} must be strictly between 0 and 1')
    if beta2 >= 1:
        raise excep.optimizationError(
            f'beta2 = {beta2} must be strictly lesser than 1')
    if beta1 >= beta2:
        raise excep.optimizationError(
            f'Incompatible Wolfe cond. parameters: beta1={beta1} '
            f'is greater or equal than beta2={beta2}')

    f, g = obj(x)
    deriv = np.inner(g, d)
    if deriv >= 0:
        raise excep.optimizationError(
            f'd is not a descent direction: {deriv} >= 0')
    alpha = alpha0
    # The lower bound alphal is initialized to 0.
    alphal = 0
    # The upper bound alphar is initialized to "infinity", that is,
    # the largest floating point number representable in the machine.
    alphar = np.finfo(np.float64).max
    finished = False
    iters = [(alpha, alphal, alphar, '')]
    while not finished:
        xnew = x + alpha * d
        fnew, gnew = obj(xnew)
        # First Wolfe condition
        if fnew > f + alpha * beta1 * deriv:
            reason = "too long"
            alphar = alpha
            alpha = (alphal + alphar) / 2.0
        # Second Wolfe condition
        elif np.inner(gnew, d) < beta2 * deriv:
            reason = "too short"
            alphal = alpha
            if alphar == np.finfo(np.float64).max:
                alpha = lbd * alpha
            else:
                alpha = (alphal + alphar) / 2.0
        else:
            reason = "ok"
            finished = True
        iters.append([alpha, alphal, alphar, reason])
    return alpha, iters