def MarkowitzOpt(mean, variance, covariance, interest_rate, min_return):
    n = mean.size + 1                   # Number of assets (number of stocks + interest rate)
    
    mu = mean.values                    # Mean returns of n assets
    temp = np.full(n, interest_rate)
    temp[:-1] = mu
    mu = temp
        
    counter = 0
    Sigma = np.zeros((n,n))                 # Covariance of n assets
    for i in np.arange(n-1):
        for j in np.arange(i, n-1):
            if i==j:
                Sigma[i,j] = variance[i]
            else:
                Sigma[i,j] = covariance[counter]
                Sigma[j,i] = Sigma[i,j]
                counter+=1
    Sigma = nearestPD(Sigma)                # Converting covariance to the nearest positive-definite matrix
    
    # Ensuring feasability of inequality contraint
    if mu.max() < min_return:
        min_return = interest_rate
    
    w = Variable(n)                         # Portfolio allocation vector
    ret = mu.T*	w
    risk = quad_form(w, Sigma)
    min_ret = Parameter(nonneg=True)
    min_ret.value = min_return
    prob = Problem(Minimize(risk),          # Restricting to long-only portfolio
                   [ret >= min_ret,
                   sum(w) == 1,
                   w >= 0])
    prob.solve()
    return w.value
def mpt_opt(data, gamma_vec):
    NUM_SAMPLES = len(gamma_vec)
    w_vec_results = [None] * NUM_SAMPLES
    ret_results = np.zeros(NUM_SAMPLES)
    risk_results = np.zeros(NUM_SAMPLES)

    N = len(data)
    w_vec = Variable(N)
    mu_vec = np.array([np.mean(data[i]) for i in range(N)])
    sigma_mat = np.cov(data)

    gamma = Parameter(nonneg=True)

    ret_val = mu_vec.T * w_vec
    risk_val = quad_form(w_vec, sigma_mat)  # w^T Sigma w
    problem = Problem(Maximize(ret_val - gamma * risk_val),
                      [sum(w_vec) == 1, w_vec >= 0])

    for i, new_gamma in enumerate(gamma_vec):
        gamma.value = new_gamma
        problem.solve()
        w_vec_results[i] = w_vec.value
        ret_results[i] = ret_val.value
        risk_results[i] = sqrt(risk_val).value

    return (w_vec_results, ret_results, risk_results)
 def __init__(self, name, c, A, b, dims):
     logging.info(name + " started")
     logging.info(name + "'s dims = " + str(dims))
     self.name = name
     _, n = np.shape(A)
     self.n = n
     self.dims = dims
     self.xbar = Parameter(n, value=np.zeros(n))
     self.xbar_old = Parameter(n, value=np.zeros(n))
     self.u = Parameter(n, value=np.zeros(n))
     self.x = Variable(n)
     self.c = c
     self.A = A
     self.b = b
     self.f = self.c.T @ self.x
     self.rho = constants.RHO
     self.f += (self.rho /
                2) * sum_squares(self.x[self.dims] - self.xbar[self.dims] +
                                 self.u[self.dims])
     logging.info(name + "'s f = " + str(self.f))
     self.prox = Problem(Minimize(self.f),
                         [self.A @ self.x == self.b, self.x >= 0])
     self.history = {
         'objval': [],
         'r_norm': [],
         'eps_pri': [],
         's_norm': [],
         'eps_dual': [],
         'iter': []
     }
     self.has_converged = False
     self.k = 0
Exemple #4
0
def run_process(f, pipe):
    xbar = Parameter(n, value=np.zeros(n))
    u = Parameter(n, value=np.zeros(n))
    f += (rho/2)*sum_squares(x - xbar + u)
    prox = Problem(Minimize(f))
    # ADMM loop.
    while True:
        prox.solve()
        pipe.send(x.value)
        xbar.value = pipe.recv()
        u.value += x.value - xbar.value
Exemple #5
0
    def __init__(self,
                 circle_list,
                 cij=None,
                 obj='ar0',
                 overlap=False,
                 min_edge=2,
                 verbose=False,
                 **kwargs):
        """
        Weighted Circle Packing Problem
            k -
            eps -
            min_edge -
        """
        FormulationR2.__init__(self, circle_list, **kwargs)

        # solver and record args
        self._solve_args = {'method': 'dccp'}
        self._in_dict = {'k': 1, 'min_edge': min_edge, 'eps': 1e-2}

        # gather inputs
        X = circle_list.point_vars
        n = len(circle_list)
        cij = cij if cij is not None else np.ones((n, n))

        # compute radii
        areas = np.asarray([x.area for x in circle_list.inputs])
        r = np.sqrt(areas /
                    np.pi)  # * np.log(1 + areas / (min_area - min_edge ** 2))
        self.r = r

        # indices of upper tris
        inds = np.triu_indices(n, 1)
        xi, xj = [x.tolist() for x in inds]

        # gather inputs
        weights = Parameter(shape=len(xi),
                            value=cij[inds],
                            name='cij',
                            nonneg=True)
        radii = Parameter(shape=len(xi),
                          value=r[xi] + r[xj],
                          name='radii',
                          nonneg=True)
        dists = cvx.norm(X[xi, :] - X[xj, :], 2, axis=1)

        # constraints
        self._constr.append(dists >= radii)

        # objective
        self._obj = Minimize(cvx.sum(cvx.multiply(weights, dists)))
Exemple #6
0
    def as_constraint(self, *args):
        """
        Imagine there are bubbles of a fixed size floating about constraining
        the discrete space

        each face is given a coordinate

         X = 1, Xg is coordinate      dist_real <= r
         X = 1, Xg is (0, 0)          dist_fake <= r + 1000

        note -
        2-norm is SIGNIFICANTLY Faster than 1norm.
        """
        N = len(self.space.faces)
        # centroids.shape[N, 2]
        centroids = np.asarray(self.space.faces.centroids)
        M = 2 * centroids.max()
        centroids = Parameter(shape=centroids.shape, value=centroids)
        px, py = self._p.X, self._p.Y
        C = []
        for i, face_set in enumerate(self._actions):
            X = face_set.vars  # selected faces
            cx = cvx.multiply(centroids[:, 0], X)
            cy = cvx.multiply(centroids[:, 1], X)
            Xg = cvx.vstack([cx, cy]).T
            v = cvx.vstack(
                [cvx.promote(px[i], (N, )),
                 cvx.promote(py[i], (N, ))]).T
            C = [cvx.norm(v - Xg, 2, axis=1) <= self._r[i] + M * (1 - X)]
        return C
Exemple #7
0
    def as_constraint(self, *args):
        """
        Imagine there are bubbles of a fixed size floating about constraining
        the discrete space

        each face is given a coordinate

         X = 1, Xg is coordinate      dist_real <= r
         X = 1, Xg is (0, 0)          dist_fake <= r + 1000

        note -
        2-norm is SIGNIFICANTLY Faster than 1norm.
        """
        N = len(self.space.faces)
        X = self.stacked  # selected faces
        M = 100  # upper bound

        # centroids.shape[N, 2]
        centroids = np.asarray(self.space.faces.centroids)
        centroids = Parameter(shape=centroids.shape, value=centroids)
        cx = cvx.multiply(centroids[:, 0], X)
        cy = cvx.multiply(centroids[:, 1], X)
        Xg = cvx.vstack([cx, cy]).T
        v = cvx.vstack(
            [cvx.promote(self._bx, (N, )),
             cvx.promote(self._by, (N, ))]).T
        C = [cvx.norm(v - Xg, 2, axis=1) <= self._r + M * (1 - X)]
        return C
    def test_square_param(self):
        """Test issue arising with square plus parameter.
        """
        a = Parameter(value=1)
        b = Variable()

        obj = Minimize(b**2 + abs(a))
        prob = Problem(obj)
        prob.solve()
        self.assertAlmostEqual(obj.value, 1.0)
Exemple #9
0
    def setup_class(self):
        self.cvx = Variable()**2
        self.ccv = Variable()**0.5
        self.aff = Variable()
        self.const = Constant(5)
        self.unknown_curv = log(Variable()**3)

        self.pos = Constant(1)
        self.neg = Constant(-1)
        self.zero = Constant(0)
        self.unknown_sign = Parameter()
Exemple #10
0
    def __init__(self, inputs, tgt, src=None, **kwargs):
        """
        Convert a PointList to a segment list
        """
        from src.cvopt.formulate.input_structs import PointList
        if isinstance(inputs, PointList):
            pass
        elif isinstance(inputs, int):
            inputs = PointList(inputs)
        FormulationR2.__init__(self, inputs, **kwargs)

        self._tgt_x = Parameter(value=tgt[0])
        self._tgt_y = Parameter(value=tgt[1])
        # source may not be an expression
        if src is None:
            self._src_x = Variable(pos=True, name='path_start_X')
            self._src_y = Variable(pos=True, name='path_start_Y')
        else:
            self._src_x = Parameter(value=src[0])
            self._src_y = Parameter(value=src[1])
Exemple #11
0
def branch_and_bound(n, A, B, c):
    from queue import PriorityQueue
    x = Variable(n)
    z = Variable(n)
    L = Parameter(n)
    U = Parameter(n)
    prob = Problem(Minimize(sum_squares(A*x + B*z - c)),
                   [L <= z, z <= U])
    visited = 0
    best_z = None
    f_best = numpy.inf
    nodes = PriorityQueue()
    nodes.put((numpy.inf, 0, numpy.zeros(n), numpy.ones(n), 0))
    while not nodes.empty():
        visited += 1
        # Evaluate the node with the lowest lower bound.
        _, _, L_val, U_val, idx = nodes.get()
        L.value = L_val
        U.value = U_val
        lower_bound = prob.solve()
        z_star = numpy.round(z.value)
        upper_bound = Problem(prob.objective, [z == z_star]).solve()
        f_best = min(f_best, upper_bound)
        if upper_bound == f_best:
            best_z = z_star
        # Add new nodes if not at a leaf and the branch cannot be pruned.
        if idx < n and lower_bound < f_best:
            for i in [0, 1]:
                L_val[idx] = U_val[idx] = i
                nodes.put((lower_bound, i, L_val.copy(), U_val.copy(), idx + 1))

    #print("Nodes visited: %s out of %s" % (visited, 2**(n+1)-1))
    return f_best, best_z
Exemple #12
0
    def test_parametric(self):
        """Test solve parametric problem vs full problem"""
        x = Variable()
        a = 10
        #  b_vec = [-10, -2., 2., 3., 10.]
        b_vec = [-10, -2.]

        for solver in self.solvers:

            print(solver)
            # Solve from scratch with no parameters
            x_full = []
            obj_full = []
            for b in b_vec:
                obj = Minimize(a * (x ** 2) + b * x)
                constraints = [0 <= x, x <= 1]
                prob = Problem(obj, constraints)
                prob.solve(solver=solver)
                x_full += [x.value]
                obj_full += [prob.value]

            # Solve parametric
            x_param = []
            obj_param = []
            b = Parameter()
            obj = Minimize(a * (x ** 2) + b * x)
            constraints = [0 <= x, x <= 1]
            prob = Problem(obj, constraints)
            for b_value in b_vec:
                b.value = b_value
                prob.solve(solver=solver)
                x_param += [x.value]
                obj_param += [prob.value]

            print(x_full)
            print(x_param)
            for i in range(len(b_vec)):
                self.assertItemsAlmostEqual(x_full[i], x_param[i], places=3)
                self.assertAlmostEqual(obj_full[i], obj_param[i])
Exemple #13
0
    def test_warm_start(self):
        """Test warm start.
        """
        m = 200
        n = 100
        np.random.seed(1)
        A = np.random.randn(m, n)
        b = Parameter(m)

        # Construct the problem.
        x = Variable(n)
        prob = Problem(Minimize(sum_squares(A * x - b)))

        b.value = np.random.randn(m)
        result = prob.solve(warm_start=False)
        result2 = prob.solve(warm_start=True)
        self.assertAlmostEqual(result, result2)
        b.value = np.random.randn(m)
        result = prob.solve(warm_start=True)
        result2 = prob.solve(warm_start=False)
        self.assertAlmostEqual(result, result2)
        pass
Exemple #14
0
	def test_lasso(self):
		# Solve the following consensus problem using ADMM:
		# Minimize sum_squares(A*x - b) + gamma*norm(x,1)
		
		# Problem data.
		m = 100
		n = 75
		np.random.seed(1)
		A = np.random.randn(m,n)
		b = np.random.randn(m)
		
		# Separate penalty from regularizer.
		x = Variable(n)
		gamma = Parameter(nonneg = True)
		funcs = [sum_squares(A*x - b), gamma*norm(x,1)]
		p_list = [Problem(Minimize(f)) for f in funcs]
		probs = Problems(p_list)
		
		# Solve via consensus.
		gamma.value = 1.0
		probs.solve(method = "consensus", rho_init = 1.0, max_iter = 50)
		print("Objective:", probs.value)
		print("Solution:", x.value)
Exemple #15
0
    def as_constraint(self, *args):
        """
        todo notes :
        performance goes with N=6 is around 1 second.
            when constraints are tightened, goes to 5-6 seconds
            overall worse than circle-BoundedSet, but more consistent results

            maybe
        """
        # centroids.shape[N, 2]
        cent = np.asarray(self.space.faces.centroids)
        centrx = Parameter(shape=cent[:, 0].shape,
                           value=cent[:, 0],
                           name='centrx')
        centry = Parameter(shape=cent[:, 1].shape,
                           value=cent[:, 1],
                           name='centry')
        bX, bY, bW, bH = self._boxlist.vars

        # base constraints - whatever for now
        C = [bW >= 1, bH >= 1]

        for i, face_set in enumerate(self._actions):
            X = face_set.vars  # selected faces
            M = 100  # todo upper bound
            cx = cvx.multiply(centrx, X)
            cy = cvx.multiply(centry, X)
            # todo maybe lienaerize furda, or use true facemin and face max instead of centroid\
            #
            # cx is within box if X_i = 1,
            C += [
                bX[i] - bW[i] / 2 <= cx - 0.4 + M * (1 - X),
                bX[i] + bW[i] / 2 >= cx + 0.4,
                bY[i] - bH[i] / 2 <= cy - 0.4 + M * (1 - X),
                bY[i] + bH[i] / 2 >= cy + 0.4,
            ]
        return C
def compute_node_be_curvature(g, n=None, solver=None, solver_options={}, verbose=False):
    if n is not None:
        if g.degree[n] > 0:
            dgamma2 = construct_dgamma2(g, n, verbose)
            gammax = construct_gammax(g, n)

            dim_b1 = gammax.shape[0]
            dim_b2 = dgamma2.shape[0]
            dim_s2 = dgamma2.shape[0] - gammax.shape[0]

            if verbose:
                print('dim dgamma2 {0}; dim gammax {1}'.format(dim_b2, dim_b1))

            gammax_ext = np.block([
                [gammax, np.zeros((dim_b1, dim_s2))],
                [np.zeros((dim_b1, dim_s2)).T, np.zeros((dim_s2, dim_s2))],
            ])

            a = Parameter((dim_b2, dim_b2), value=dgamma2)
            b = Parameter((dim_b2, dim_b2), value=gammax_ext)
            kappa = Variable()
            constraints = [(a - kappa * b >> 0)]

            objective = Maximize(kappa)
            prob = Problem(objective, constraints)
            if verbose:
                print(prob.status)
            prob.solve(solver=solver, **solver_options)
            if verbose:
                print(prob.status)
            return prob.value
        else:
            return 0
    else:
        r = {n: compute_node_be_curvature(g, n, solver=solver, solver_options=solver_options, verbose=verbose) for n in g.nodes()}
    return r
def create_update(f):
    x = Variable(n)
    u = Parameter(n)

    def local_update(xbar):
        # Update u.
        if x.value is None:
            u.value = np.zeros(n)
        else:
            u.value += x.value - xbar
        # Update x.
        obj = f(x) + (rho / 2) * sum_squares(x - xbar + u)
        Problem(Minimize(obj)).solve()
        return x.value

    return local_update
Exemple #18
0
    def as_constraint(self, *args):
        """ for the faces of self.space, each partitioning X must have atleast
            2 adjacent faces of same selection,

            or if X is 0, then
            +---+---+---+
            |   | 0 |   |
            +---+---+---+
            | A | A | 0 |
            +---+---+---+
            | A | A |   |
            +---+---+---+
        self._actions is a Variable of size 'space.num_faces'
        """
        N = len(self.space.faces)
        M = np.zeros((N, N), dtype=int)
        for k, vs in self.space.faces.to_faces.items():
            M[list(vs), k] = 1

        M = Parameter(shape=M.shape,
                      value=M,
                      symmetric=True,
                      name='face_adj_mat')
        # print(M)
        C = []
        for action in self._actions:
            # [ N, N ] @ [N, 1] -> N
            # 2 versions of this tested ->
            # v1 - use inverse directly to meet constraint this is empirically slower
            # this enforces connectivity
            C += [
                self._c <= M @ action.vars + (self._c + 1) * (1 - action.vars)
            ]

            # ------------------
            # v2 - use a slack variable either X or V
            # v = Variable(shape=N, boolean=True, name='indicator.{}.{}'.format(self.name, action.name))
            # C += [
            #     1 <= action.vars + v,
            #     self._adj_lim <= M @ action.vars + 4 * v,
            #     # self._adj_lim <= M @ (1 - action.vars)
            # ]
        return C
Exemple #19
0
from multiprocessing import Pool

import cvxopt
import numpy
from pylab import figure, show

from cvxpy import Maximize, Parameter, Problem, Variable, norm2, square

num_assets = 100
num_factors = 20

mu = cvxopt.exp(cvxopt.normal(num_assets))
F = cvxopt.normal(num_assets, num_factors)
D = cvxopt.spdiag(cvxopt.uniform(num_assets))
x = Variable(num_assets)
gamma = Parameter(nonneg=True)

expected_return = mu.T * x
variance = square(norm2(F.T * x)) + square(norm2(D * x))

# construct portfolio optimization problem *once*
p = Problem(Maximize(expected_return - gamma * variance),
            [sum(x) == 1, x >= 0])


# encapsulate the allocation function
def allocate(gamma_value):
    gamma.value = gamma_value
    p.solve()
    w = x.value
    expected_return, risk = mu.T * w, w.T * (F * F.T + D * D) * w
Exemple #20
0
    R = 0.1 * sparse.eye(1)

    # Initial and reference states
    x0 = np.array([0.1, 0.2])  # initial state
    # Reference input and states
    pref = 7.0
    vref = 0
    xref = np.array([pref, vref])  # reference state

    # Prediction horizon
    Np = 20

    # Define problem
    u = Variable((nu, Np))
    x = Variable((nx, Np + 1))
    x_init = Parameter(nx)
    objective = 0
    constraints = [x[:, 0] == x_init]
    for k in range(Np):
        objective += quad_form(x[:, k] - xref, Q) + quad_form(
            u[:, k], R)  # objective function
        constraints += [x[:, k + 1] == Ad @ x[:, k] + Bd @ u[:, k]
                        ]  # system dynamics constraint
        constraints += [xmin <= x[:, k],
                        x[:, k] <= xmax]  # state interval constraint
        constraints += [umin <= u[:, k],
                        u[:, k] <= umax]  # input interval constraint
    objective += quad_form(x[:, Np] - xref, QN)
    prob = Problem(Minimize(objective), constraints)

    # Simulate in closed loop
Exemple #21
0
 def __init__(self, inputs, cvx_set, **kwargs):
     ObjectiveR2.__init__(self, inputs, **kwargs)
     shp = self.inputs.point_vars.shape
     self._cvx_set = Parameter(shape=shp,
                               value=np.tile(cvx_set, (shp[0], 1)))
    Qy = np.diag([20])   # or sparse.diags([])
    QyN = np.diag([20])  # final cost
    #Qg = 0.01 * np.eye(1)
    QDg = 0.5 * sparse.eye(1)  # Quadratic cost for Du0, Du1, ...., Du_N-1

    # Initial and reference
    x0 = np.array([0.0, 0.0])  # initial state
    r = 1.0  # Reference output

    # Prediction horizon
    Np = 40

    # Define problem
    u = Variable((nu, Np))
    x = Variable((nx, Np + 1))
    x_init = Parameter(nx)
    uminus1 = Parameter(nu)  # input at time instant negative one (from previous MPC window or uinit in the first MPC window)

    objective = 0
    constraints = [x[:, 0] == x_init]
    y = Cd @ x
    for k in range(Np):
        if k > 0:
            objective += quad_form(u[:, k] - u[:, k - 1], QDg)  # \sum_{k=1}^{N_p-1} (uk - u_k-1)'QDu(uk - u_k-1)
        else:  # at k = 0...
#            if uminus1[0] is not np.nan:  # if there is an uold to be considered
            objective += quad_form(u[:, k] - uminus1, QDg)  # ... penalize the variation of u0 with respect to uold

        objective += quad_form(y[:, k] - r, Qy)
        #objective += quad_form(u[:, k], Qg)  # objective function
Exemple #23
0
def calculate_portfolio(cvxtype, returns_function, long_only, exp_return, 
                        selected_solver, max_pos_size, ticker_list):
    assert cvxtype in ['minimize_risk','maximize_return']
    
    """ Variables:
        mu is the vector of expected returns.
        sigma is the covariance matrix.
        gamma is a Parameter that trades off risk and return.
        x is a vector of stock holdings as fractions of total assets.
    """
    
    gamma = Parameter(nonneg=True)
    gamma.value = 1
    returns, stocks, betas = returns_function
        
    cov_mat = returns.cov()
    Sigma = cov_mat.values # np.asarray(cov_mat.values) 
    w = Variable(len(cov_mat))  # #number of stocks for portfolio weights
    risk = quad_form(w, Sigma)  #expected_variance => w.T*C*w =  quad_form(w, C)
    # num_stocks = len(cov_mat)
    
    if cvxtype == 'minimize_risk': # Minimize portfolio risk / portfolio variance
        if long_only == True:
            prob = Problem(Minimize(risk), [sum(w) == 1, w > 0 ])  # Long only
        else:
            prob = Problem(Minimize(risk), [sum(w) == 1]) # Long / short 
    
    elif cvxtype == 'maximize_return': # Maximize portfolio return given required level of risk
        #mu  #Expected return for each instrument
        #expected_return = mu*x
        #risk = quad_form(x, sigma)
        #objective = Maximize(expected_return - gamma*risk)
        #p = Problem(objective, [sum_entries(x) == 1])
        #result = p.solve()

        mu = np.array([exp_return]*len(cov_mat)) # mu is the vector of expected returns.
        expected_return = np.reshape(mu,(-1,1)).T * w  # w is a vector of stock holdings as fractions of total assets.   
        objective = Maximize(expected_return - gamma*risk) # Maximize(expected_return - expected_variance)
        if long_only == True:
            constraints = [sum(w) == 1, w > 0]
        else: 
            #constraints=[sum_entries(w) == 1,w <= max_pos_size, w >= -max_pos_size]
            constraints=[sum(w) == 1]
        prob = Problem(objective, constraints)

    prob.solve(solver=selected_solver)
    
    weights = []

    for weight in w.value:
        weights.append(float(weight))
        
    if cvxtype == 'maximize_return':
        optimal_weights = {"Optimal expected return":expected_return.value,
                        "Optimal portfolio weights":np.round(weights,2),
                        "tickers": ticker_list,
                        "Optimal risk": risk.value*100
                        }
        
    elif cvxtype == 'minimize_risk':
        optimal_weights = {"Optimal portfolio weights":np.round(weights,2),
                        "tickers": ticker_list,
                        "Optimal risk": risk.value*100
                        }   
    return optimal_weights
Exemple #24
0
from queue import PriorityQueue

import numpy
from cvxpy import Minimize, Parameter, Problem, sum_squares, Variable

# Problem data.
m = 25
n = 20
numpy.random.seed(1)
A = numpy.matrix(numpy.random.randn(m, n))
b = numpy.matrix(numpy.random.randn(m, 1))
#b = A*numpy.random.uniform(-1, 1, size=(n, 1))

# Construct the problem.
x = Variable(n)
L = Parameter(n)
U = Parameter(n)
f = lambda x: sum_squares(A*x - b)
prob = Problem(Minimize(f(x)),
               [L <= x, x <= U])

visited = 0
best_solution = numpy.inf
best_x = 0
nodes = PriorityQueue()
nodes.put((numpy.inf, 0, -numpy.ones(n), numpy.ones(n), 0))
while not nodes.empty():
    visited += 1
    # Evaluate the node with the lowest lower bound.
    _, _, L_val, U_val, idx = nodes.get()
    L.value = L_val
Exemple #25
0
# Uses the Alternating Direction Method of Multipliers
# with a (non-convex) cardinality constraint.

# Generate data.
np.random.seed(1)
N = 50
M = 40
n = 10
data = []
for i in range(N):
    data += [(1, np.random.normal(1.0, 2.0, (n, 1)))]
for i in range(M):
    data += [(-1, np.random.normal(-1.0, 2.0, (n, 1)))]

# Construct problem.
gamma = Parameter(nonneg=True)
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
a = Card(n, k=6)
b = Variable()

slack = [pos(1 - label*(sample.T*a - b)) for (label, sample) in data]
objective = Minimize(norm(a, 2) + gamma*sum(slack))
p = Problem(objective)
# Extensions can attach new solve methods to the CVXPY Problem class.
p.solve(method="admm")

# Count misclassifications.
error = 0
for label, sample in data:
    if not label*(a.value.T*sample - b.value)[0] >= 0:
Exemple #26
0
# (a figure is generated)
#
# Let p_star(epsilon) be the optimal value of the following problem:
#               minimize    ||Ax + b + epsilon*d||_1
# Plots p_star(epsilon) versus epsilon and demonstrates the fact that it's
# affine on an interval that includes epsilon = 0.

# Input data
m = 6
n = 3
A = cvxopt.matrix(
    [-2, 7, 1, -5, -1, 3, -7, 3, -5, -1, 4, -4, 1, 5, 5, 2, -5, -1], (m, n))

b = cvxopt.matrix([-4, 3, 9, 0, -11, 5], (m, 1))
d = cvxopt.matrix([-10, -13, -27, -10, -7, 14], (m, 1))
epsilon = Parameter()

# The problem
x = Variable(n)
objective = Minimize(norm(A * x + b + epsilon * d, 1))
p = Problem(objective, [])


# Assign a value to gamma and find the optimal x
def get_p(e_value):
    epsilon.value = e_value
    result = p.solve()
    return result


# Range of epsilon values
Exemple #27
0
    QDy = 10 * np.eye(ny)  # penalty on Delta y
    Qrg = 10 * np.eye(ny)
    QDg = 100 * sparse.eye(ny)  # Quadratic cost for Du0, Du1, ...., Du_N-1

    # Initial state, reference, command
    x0 = np.array(2 * [0.0, 0.0])  # initial state
    y0 = Cd @ x0  # initial state
    gm1 = np.array(2 *
                   [0.0])  # g at time -1, used for the constraint on Delta g

    # In[MPC Problem setup]
    g = Variable((ng, Np))
    x = Variable((nx, Np))
    eps_slack = Variable(ny)

    x_init = Parameter(nx)
    gminus1 = Parameter(
        ny
    )  # MPC command at time -1 (from previous MPC window or g_step_old for the first instant)
    yminus1 = Parameter(
        ny
    )  # system output at time -1 (from previous MPC window or y_step_old for the first instant)
    r = Parameter(ny)

    y = Cd @ x + Dd @ g  # system output definition
    objective = 0.0
    objective += quad_form(eps_slack, 1e4 *
                           np.eye(ny))  # constraint violation penalty on slack
    constraints = [x[:, 0] == x_init]  # initial state constraint
    constraints += [eps_slack >= 0.0]  # slack positive constraint
Exemple #28
0
# Taken from CVX website http://cvxr.com/cvx/examples/
# Exercise 5.1d: Sensitivity analysis for a simple QCQP
# Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan
# Original comments below

# Boyd & Vandenberghe, "Convex Optimization"
# Joelle Skaf - 08/29/05
# (a figure is generated)
#
# Let p_star(u) denote the optimal value of:
#           minimize    x^2 + 1
#               s.t.    (x-2)(x-2)<=u
# Finds p_star(u) and plots it versus u

u = Parameter()
x = Variable()

objective = Minimize(quad_form(x, 1) + 1)
constraint = [quad_form(x, 1) - 6 * x + 8 <= u]
p = Problem(objective, constraint)


# Assign a value to gamma and find the optimal x.
def get_x(u_value):
    u.value = u_value
    result = p.solve()
    return x.value


u_values = np.linspace(-0.9, 10, num=50)
Exemple #29
0
exact = 0.5 * np.sin(2 * np.pi * t / n) * np.sin(0.01 * t)
corrupt = exact + 0.05 * np.random.randn(len(exact))
corrupt = cvxopt.matrix(corrupt)

e = np.ones(n).T
ee = np.column_stack((-e, e)).T
D = sparse.spdiags(ee, range(-1, 1), n, n)
D = D.todense()
D = cvxopt.matrix(D)

# Solve in parallel
nopts = 10
lambdas = np.linspace(0, 50, nopts)
# Frame the problem with a parameter
lamb = Parameter(nonneg=True)
x = Variable(n)
p = Problem(Minimize(norm(x - corrupt) + norm(D * x) * lamb))


# For a value of lambda g, we solve the problem
# Returns [ ||Dx||_2 and ||x-x_cor||_2 ]
def get_value(g):
    lamb.value = g
    result = p.solve()
    return [np.linalg.norm(x.value - corrupt), np.linalg.norm(D * x.value)]


pool = Pool(processes=4)
# compute allocation in parallel
norms1, norms2 = zip(*pool.map(get_value, lambdas))
Exemple #30
0
    Qy = np.diag(2 * [20])  # or sparse.diags([])
    #QyN = np.diag(2*[20])  # final cost
    QDy = np.eye(ny)
    Qrg = 100 * np.eye(ny)
    QDg = 0.5 * sparse.eye(ny)  # Quadratic cost for Du0, Du1, ...., Du_N-1

    # Initial and reference
    x0 = np.array(2 * [0.0, 0.0])  # initial state

    # Prediction horizon
    Np = 40

    # Define problem
    g = Variable((ng, Np))
    x = Variable((nx, Np))
    x_init = Parameter(nx)
    gminus1 = Parameter(
        ny
    )  # input at time instant negative one (from previous MPC window or uinit in the first MPC window)
    yminus1 = Parameter(
        ny
    )  # input at time instant negative one (from previous MPC window or uinit in the first MPC window)
    r = Parameter(ny)

    objective = 0.0
    constraints = [x[:, 0] == x_init]
    y = Cd @ x + Dd @ g

    for k in range(Np):
        objective += quad_form(y[:, k] - r, Qy)  # tracking cost
        objective += quad_form(g[:, k] - r, Qrg)  # reference governor cost