示例#1
0
def frank_wolfe(objective, gradient, A, b, initial_x,
                maxiters=2000, tol=1e-4, clean=True, verbose=None):
    """
    Uses the Frank--Wolfe algorithm to minimize the convex objective.

    Minimization is subject to the linear equality constraint: A x = b.

    Assumes x should be nonnegative.

    Parameters
    ----------
    objective : callable
        The objective function. It would receive a ``cvxopt`` matrix for the
        input `x` and return the value of the objective function.
    gradient : callable
        The gradient function. It should receive a ``cvxopt`` matrix for the
        input `x` and return the value of the gradient evaluated at `x`.
    A : matrix
        A ``cvxopt`` matrix specifying the LHS linear equality constraints.
    b : matrix
        A ``cvxopt`` matrix specifying the RHS linear equality constraints.
    initial_x : matrix
        A ``cvxopt`` matrix specifying the initial `x` to use.
    maxiters : int
        The maximum number of iterations to perform. If convergence was not
        reached after the last iteration, a warning is issued and the current
        value of `x` is returned.
    tol : float
        The tolerance used to determine when we have converged to the optimum.
    clean : bool
        Occasionally, the iteration process will take nonnegative values to be
        ever so slightly negative. If ``True``, then we forcibly make such
        values equal to zero and renormalize the vector. This is an application
        specific decision and is probably not more generally useful.
    verbose : int
        An integer representing the logging level ala the ``logging`` module.
        If `None`, then (effectively) the log level is set to `WARNING`. For
        a bit more information, set this to `logging.INFO`. For a bit less,
        set this to `logging.ERROR`, or perhaps 100.

    """
    # Function level import to avoid circular import.
    from dit.algorithms.optutil import op_runner

    # Function level import to keep cvxopt dependency optional.
    # All variables should be cvxopt variables, not NumPy arrays
    from cvxopt import matrix
    from cvxopt.modeling import variable

    # Set up a custom logger.
    logger = basic_logger('dit.frankwolfe', verbose)

    # Set cvx info level based on logging.DEBUG level.
    if logger.isEnabledFor(logging.DEBUG):
        show_progress = True
    else:
        show_progress = False

    assert (A.size[1] == initial_x.size[0])

    n = initial_x.size[0]
    x = initial_x
    xdiff = 0

    TOL = 1e-7
    verbosechunk = maxiters / 10
    for i in range(maxiters):
        obj = objective(x)
        grad = gradient(x)

        xbar = variable(n)

        new_objective = grad.T * xbar
        constraints = []
        constraints.append((xbar >= 0))
        constraints.append((-TOL <= A * xbar - b))
        constraints.append((A * xbar - b <= TOL))

        logger.debug('FW Iteration: {}'.format(i))
        opt = op_runner(new_objective, constraints, show_progress=show_progress)
        if opt.status != 'optimal':
            msg = '\tFrank-Wolfe: Did not find optimal direction on '
            msg += 'iteration {}: {}'
            msg = msg.format(i, opt.status)
            logger.info(msg)

        # Calculate optimality gap
        xbar_opt = opt.variables()[0].value
        opt_bd = grad.T * (xbar_opt - x)

        msg = "i={:6}  obj={:10.7f}  opt_bd={:10.7f}  xdiff={:12.10f}"
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug(msg.format(i, obj, opt_bd[0, 0], xdiff))
            logger.debug("")
        elif i % verbosechunk == 0:
            logger.info(msg.format(i, obj, opt_bd[0, 0], xdiff))

        xnew = (i * x + 2 * xbar_opt) / (i + 2)
        xdiff = np.linalg.norm(xnew - x)
        x = xnew

        if xdiff < tol:
            obj = objective(x)
            break
    else:
        msg = "Only converged to xdiff={:12.10f} after {} iterations. "
        msg += "Desired: {}"
        logger.warn(msg.format(xdiff, maxiters, tol))

    xopt = np.array(x)

    if clean:
        xopt[np.abs(xopt) < tol] = 0
        xopt /= xopt.sum()

    return xopt, obj
示例#2
0
文件: pid_broja.py 项目: vreuter/dit
    def initial_dist(self):
        """
        Find an initial point in the interior of the feasible set.

        """
        from cvxopt import matrix
        from cvxopt.modeling import variable

        A = self.A
        b = self.b

        # Assume they are already CVXOPT matrices
        if self.vartypes and A.size[1] != len(self.vartypes.free): # pylint: disable=no-member
            msg = 'A must be the reduced equality constraint matrix.'
            raise Exception(msg)

        # Set cvx info level based on logging.INFO level.
        if self.logger.isEnabledFor(logging.INFO):
            show_progress = True
        else:
            show_progress = False

        n = len(self.vartypes.free) # pylint: disable=no-member
        x = variable(n)
        t = variable()

        tols = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3]
        for tol in tols:
            constraints = []
            constraints.append((-tol <= A * x - b))
            constraints.append((A * x - b <= tol))
            constraints.append((x >= t))

            # Objective to minimize
            objective = -t

            opt = op_runner(objective, constraints, show_progress=show_progress)
            if opt.status == 'optimal':
                #print("Found initial point with tol={}".format(tol))
                break
        else:
            msg = 'Could not find valid initial point: {}'
            raise Exception(msg.format(opt.status))

        # Grab the optimized x. Perhaps there is a more reliable way to get
        # x rather than t. For now,w e check the length.
        optvariables = opt.variables()
        if len(optvariables[0]) == n:
            xopt = optvariables[0].value
        else:
            xopt = optvariables[1].value

        # Turn values close to zero to be exactly equal to zero.
        xopt = np.array(xopt)[:, 0]
        xopt[np.abs(xopt) < tol] = 0
        # Normalize properly accounting for fixed nonzero values.
        xopt /= xopt.sum()
        xopt *= self.normalization

        # Do not build the full vector since this is input to the reduced
        # optimization problem.
        #xx = np.zeros(len(dist.pmf))
        #xx[variables.nonzero] = xopt

        return xopt, opt
示例#3
0
def frank_wolfe(objective,
                gradient,
                A,
                b,
                initial_x,
                maxiters=2000,
                tol=1e-4,
                clean=True,
                verbose=None):
    """
    Uses the Frank--Wolfe algorithm to minimize the convex objective.

    Minimization is subject to the linear equality constraint: A x = b.

    Assumes x should be nonnegative.

    Parameters
    ----------
    objective : callable
        The objective function. It would receive a ``cvxopt`` matrix for the
        input `x` and return the value of the objective function.
    gradient : callable
        The gradient function. It should receive a ``cvxopt`` matrix for the
        input `x` and return the value of the gradient evaluated at `x`.
    A : matrix
        A ``cvxopt`` matrix specifying the LHS linear equality constraints.
    b : matrix
        A ``cvxopt`` matrix specifying the RHS linear equality constraints.
    initial_x : matrix
        A ``cvxopt`` matrix specifying the initial `x` to use.
    maxiters : int
        The maximum number of iterations to perform. If convergence was not
        reached after the last iteration, a warning is issued and the current
        value of `x` is returned.
    tol : float
        The tolerance used to determine when we have converged to the optimum.
    clean : bool
        Occasionally, the iteration process will take nonnegative values to be
        ever so slightly negative. If ``True``, then we forcibly make such
        values equal to zero and renormalize the vector. This is an application
        specific decision and is probably not more generally useful.
    verbose : int
        An integer representing the logging level ala the ``logging`` module.
        If `None`, then (effectively) the log level is set to `WARNING`. For
        a bit more information, set this to `logging.INFO`. For a bit less,
        set this to `logging.ERROR`, or perhaps 100.

    """
    # Function level import to avoid circular import.
    from dit.algorithms.optutil import op_runner

    # Function level import to keep cvxopt dependency optional.
    # All variables should be cvxopt variables, not NumPy arrays
    from cvxopt import matrix
    from cvxopt.modeling import variable

    # Set up a custom logger.
    logger = basic_logger('dit.frankwolfe', verbose)

    # Set cvx info level based on logging.DEBUG level.
    if logger.isEnabledFor(logging.DEBUG):
        show_progress = True
    else:
        show_progress = False

    assert (A.size[1] == initial_x.size[0])

    n = initial_x.size[0]
    x = initial_x
    xdiff = 0

    TOL = 1e-7
    verbosechunk = maxiters / 10
    for i in range(maxiters):
        obj = objective(x)
        grad = gradient(x)

        xbar = variable(n)

        new_objective = grad.T * xbar
        constraints = []
        constraints.append((xbar >= 0))
        constraints.append((-TOL <= A * xbar - b))
        constraints.append((A * xbar - b <= TOL))

        logger.debug('FW Iteration: {}'.format(i))
        opt = op_runner(new_objective,
                        constraints,
                        show_progress=show_progress)
        if opt.status != 'optimal':
            msg = '\tFrank-Wolfe: Did not find optimal direction on '
            msg += 'iteration {}: {}'
            msg = msg.format(i, opt.status)
            logger.info(msg)

        # Calculate optimality gap
        xbar_opt = opt.variables()[0].value
        opt_bd = grad.T * (xbar_opt - x)

        msg = "i={:6}  obj={:10.7f}  opt_bd={:10.7f}  xdiff={:12.10f}"
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug(msg.format(i, obj, opt_bd[0, 0], xdiff))
            logger.debug("")
        elif i % verbosechunk == 0:
            logger.info(msg.format(i, obj, opt_bd[0, 0], xdiff))

        xnew = (i * x + 2 * xbar_opt) / (i + 2)
        xdiff = np.linalg.norm(xnew - x)
        x = xnew

        if xdiff < tol:
            obj = objective(x)
            break
    else:
        msg = "Only converged to xdiff={:12.10f} after {} iterations. "
        msg += "Desired: {}"
        logger.warn(msg.format(xdiff, maxiters, tol))

    xopt = np.array(x)

    if clean:
        xopt[np.abs(xopt) < tol] = 0
        xopt /= xopt.sum()

    return xopt, obj
示例#4
0
文件: pid_broja.py 项目: chebee7i/dit
    def initial_dist(self):
        """
        Find an initial point in the interior of the feasible set.

        """
        from cvxopt import matrix
        from cvxopt.modeling import variable

        A = self.A
        b = self.b

        # Assume they are already CVXOPT matrices
        if self.vartypes and A.size[1] != len(self.vartypes.free):
            msg = 'A must be the reduced equality constraint matrix.'
            raise Exception(msg)

        # Set cvx info level based on logging.INFO level.
        if self.logger.isEnabledFor(logging.INFO):
            show_progress = True
        else:
            show_progress = False

        n = len(self.vartypes.free)
        x = variable(n)
        t = variable()

        tols = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3]
        for tol in tols:
            constraints = []
            constraints.append((-tol <= A * x - b))
            constraints.append((A * x - b <= tol))
            constraints.append((x >= t))

            # Objective to minimize
            objective = -t

            opt = op_runner(objective, constraints, show_progress=show_progress)
            if opt.status == 'optimal':
                #print("Found initial point with tol={}".format(tol))
                break
        else:
            msg = 'Could not find valid initial point: {}'
            raise Exception(msg.format(opt.status))

        # Grab the optimized x. Perhaps there is a more reliable way to get
        # x rather than t. For now,w e check the length.
        optvariables = opt.variables()
        if len(optvariables[0]) == n:
            xopt = optvariables[0].value
        else:
            xopt = optvariables[1].value

        # Turn values close to zero to be exactly equal to zero.
        xopt = np.array(xopt)[:, 0]
        xopt[np.abs(xopt) < tol] = 0
        # Normalize properly accounting for fixed nonzero values.
        xopt /= xopt.sum()
        xopt *= self.normalization

        # Do not build the full vector since this is input to the reduced
        # optimization problem.
        #xx = np.zeros(len(dist.pmf))
        #xx[variables.nonzero] = xopt

        return xopt, opt