Example #1
0
    def test_partial_optimize_numeric_fn(self):
        x, y = Variable(1), Variable(1)
        xval = 4

        # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program)
        p1 = Problem(Minimize(y), [xval + y >= 3])
        p1.solve()

        # Solve the two-stage problem via partial_optimize
        constr = [y >= -100]
        p2 = Problem(Minimize(y), [x + y >= 3] + constr)
        g = cvxpy.partial_optimize(p2, [y], [x])
        x.value = xval
        y.value = 42
        constr[0].dual_variable.value = 42
        result = g.value
        self.assertAlmostEqual(result, p1.value)
        self.assertAlmostEqual(y.value, 42)
        self.assertAlmostEqual(constr[0].dual_value, 42)

        # No variables optimized over.
        p2 = Problem(Minimize(y), [x + y >= 3])
        g = cvxpy.partial_optimize(p2, [], [x, y])
        x.value = xval
        y.value = 42
        p2.constraints[0].dual_variable.value = 42
        result = g.value
        self.assertAlmostEqual(result, y.value)
        self.assertAlmostEqual(y.value, 42)
        self.assertAlmostEqual(p2.constraints[0].dual_value, 42)
Example #2
0
 def test_pnorm(self):
     """ Test domain for pnorm.
     """
     dom = pnorm(self.a, -0.5).domain
     prob = Problem(Minimize(self.a), dom)
     prob.solve()
     self.assertAlmostEqual(prob.value, 0)
Example #3
0
    def test_value_at_risk(self):
        # Create problem data.
        n = numpy.random.randint(1,10)
        pbar = numpy.random.randn(n)
        Sigma = numpy.eye(n)
        p = NormalRandomVariable(pbar,Sigma)

        o = numpy.ones((n,1))
        beta = 0.05
        num_samples = 50

        # Create and solve optimization problem.
        x = Variable(n)
        p1 = Problem(Minimize(-x.T*pbar), [prob(-x.T*p >= 0, num_samples) <= beta, x.T*o == 1, x >= -0.1])
        p1.solve()

        # Create and solve analytic form of optimization problem (as a check).
        p2 = Problem(Minimize(-x.T*pbar),
                     [x.T*pbar >= scipy.stats.norm.ppf(1-beta) * norm2(sqrtm(Sigma) * x), x.T*o == 1, x >= -0.1])
        p2.solve()

        tol = 0.1
        if numpy.abs(p1.value - p2.value) < tol:
            self.assertAlmostEqual(1,1)
        else:
            self.assertAlmostEqual(1,0)
Example #4
0
 def test_matrix_frac(self):
     """Test domain for matrix_frac.
     """
     dom = matrix_frac(self.x, self.A + np.eye(2)).domain
     prob = Problem(Minimize(sum_entries(diag(self.A))), dom)
     prob.solve(solver=cvxpy.SCS)
     self.assertAlmostEquals(prob.value, -2, places=3)
Example #5
0
def l1_solution(A, b, lam=0.5):
    N = A.shape[0]
    x = Variable(N)
    objective = Minimize(sum_entries(square(A * x - b)) + lam * norm(x, 1))
    constraints = []
    prob = Problem(objective, constraints)

    prob.solve()
    xhat = x.value
    return xhat
Example #6
0
    def test_partial_optimize_min_1norm(self):
        # Minimize the 1-norm in the usual way
        dims = 3
        x, t = Variable(dims), Variable(dims)
        p1 = Problem(Minimize(sum_entries(t)), [-t<=x, x<=t])

        # Minimize the 1-norm via partial_optimize
        g = partial_optimize(p1, [t], [x])
        p2 = Problem(Minimize(g))
        p2.solve()

        p1.solve()
        self.assertAlmostEqual(p1.value, p2.value)
Example #7
0
    def test_partial_optimize_simple_problem(self):
        x, y = Variable(1), Variable(1)

        # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program)
        p1 = Problem(Minimize(x+y), [x+y>=3, y>=4, x>=5])
        p1.solve()

        # Solve the two-stage problem via partial_optimize
        p2 = Problem(Minimize(y), [x+y>=3, y>=4])
        g = partial_optimize(p2, [y], [x])
        p3 = Problem(Minimize(x+g), [x>=5])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)
Example #8
0
    def test_partial_optimize_numeric_fn(self):
        x,y = Variable(1), Variable(1)
        xval = 4

        # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program)
        p1 = Problem(Minimize(y), [xval+y>=3])
        p1.solve()

        # Solve the two-stage problem via partial_optimize
        p2 = Problem(Minimize(y), [x+y>=3])
        g = partial_optimize(p2, [y], [x])
        result = g(x).numeric([xval])
        self.assertAlmostEqual(result, p1.value)
Example #9
0
    def test_partial_optimize_eval_1norm(self):
        # Evaluate the 1-norm in the usual way (i.e., in epigraph form).
        dims = 3
        x, t = Variable(dims), Variable(dims)
        xval = [-5]*dims
        p1 = Problem(Minimize(sum_entries(t)), [-t<=xval, xval<=t])
        p1.solve()

        # Minimize the 1-norm via partial_optimize.
        p2 = Problem(Minimize(sum_entries(t)), [-t<=x, x<=t])
        g = partial_optimize(p2, [t], [x])
        p3 = Problem(Minimize(g(xval)), [])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)
Example #10
0
    def test_robust_svm(self):
        # Create problem data.
        m = 100                                                 # num train points
        m_pos = math.floor(m/2)
        m_neg = m - m_pos

        n = 2 # num dimensions
        mu_pos = 2*numpy.ones(n)
        mu_neg = -2*numpy.ones(n)
        sigma = 1
        X = numpy.matrix(numpy.vstack((mu_pos + sigma*numpy.random.randn(m_pos,n),
                                       mu_neg + sigma*numpy.random.randn(m_neg,n))))

        y = numpy.hstack((numpy.ones(m_pos), -1*numpy.ones(m_neg)))

        C = 1                                                   # regularization trade-off parameter
        ns = 50
        eta = 0.1

        # Create and solve optimization problem.
        w, b, xi = Variable(n), Variable(), NonNegative(m)

        constr = []
        Sigma = 0.1*numpy.eye(n)
        for i in range(m):
            mu = numpy.array(X[i])[0]
            x = NormalRandomVariable(mu, Sigma)
            chance = prob(-y[i]*(w.T*x+b) >= (xi[i]-1), ns)
            constr += [chance <= eta]

        p = Problem(Minimize(norm(w,2) + C*sum_entries(xi)),
                     constr)
        p.solve(verbose=True)

        w_new = w.value
        b_new = b.value

        # Create and solve the canonical SVM problem.
        constr = []
        for i in range(m):
            constr += [y[i]*(X[i]*w+b) >= (1-xi[i])]

        p2 = Problem(Minimize(norm(w,2) + C*sum_entries(xi)), constr)
        p2.solve()

        w_old = w.value
        b_old = b.value

        self.assert_feas(p)
Example #11
0
    def test_partial_optimize_eval_1norm(self):
        # Evaluate the 1-norm in the usual way (i.e., in epigraph form).
        dims = 3
        x, t = Variable(dims), Variable(dims)
        xval = [-5]*dims
        p1 = Problem(cvxpy.Minimize(sum_entries(t)), [-t <= xval, xval <= t])
        p1.solve()

        # Minimize the 1-norm via partial_optimize.
        p2 = Problem(cvxpy.Minimize(sum_entries(t)), [-t <= x, x <= t])
        g = cvxpy.partial_optimize(p2, [t], [x])
        p3 = Problem(cvxpy.Minimize(g), [x == xval])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)

        # Minimize the 1-norm using maximize.
        p2 = Problem(cvxpy.Maximize(sum_entries(-t)), [-t <= x, x <= t])
        g = cvxpy.partial_optimize(p2, opt_vars=[t])
        p3 = Problem(cvxpy.Maximize(g), [x == xval])
        p3.solve()
        self.assertAlmostEqual(p1.value, -p3.value)

        # Try leaving out args.

        # Minimize the 1-norm via partial_optimize.
        p2 = Problem(cvxpy.Minimize(sum_entries(t)), [-t <= x, x <= t])
        g = cvxpy.partial_optimize(p2, opt_vars=[t])
        p3 = Problem(cvxpy.Minimize(g), [x == xval])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)

        # Minimize the 1-norm via partial_optimize.
        g = cvxpy.partial_optimize(p2, dont_opt_vars=[x])
        p3 = Problem(cvxpy.Minimize(g), [x == xval])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)

        with self.assertRaises(Exception) as cm:
            g = cvxpy.partial_optimize(p2)
        self.assertEqual(str(cm.exception),
                         "partial_optimize called with neither opt_vars nor dont_opt_vars.")

        with self.assertRaises(Exception) as cm:
            g = cvxpy.partial_optimize(p2, [], [x])
        self.assertEqual(str(cm.exception),
                         ("If opt_vars and new_opt_vars are both specified, "
                          "they must contain all variables in the problem.")
                         )
Example #12
0
def GetMinimalSpeedToReachEpsilonNeighbordhoodVector(dt, epsilon, W, dW, dF):
        Ndim = W.shape[0]
        Nsamples = W.shape[1]

        dist_w = np.zeros((Nsamples-1))
        for i in range(0,Nsamples-1):
                dist_w[i] = np.linalg.norm(W[:,i]-W[:,i+1])

        p = Variable(Nsamples-1)
        sM = Variable(Nsamples-1)

        constraints = []
        objfunc = 0.0
        for i in range(0,Nsamples-1):
                #constraints.append( norm(p*dt*dW0[0:2] + dF +np.dot(dw,np.array((1,0))) ) < epsilon )
                constraints.append( norm(p[i]*dt*dW[:,i] + dt*dt/2*dF[:,i] + np.dot(dist_w[i],np.array((1,0,0,0))) ) < epsilon )
                constraints.append( sM[i] >= p[i] )
                constraints.append( sM[i] >= 0.0)
                constraints.append( p[i] >= 0.0 )
                objfunc += norm(sM[i])

        objective = Minimize(objfunc)

        prob = Problem(objective, constraints)

        print "solve minimal speed"
        result = prob.solve(solver=SCS)
        print "done.(",prob.value,"|",np.min(sM.value),")"

        if prob.value < inf:
                return np.array(sM.value).flatten()
        else:
                return np.array(sM.value).flatten()
Example #13
0
    def test_partial_optimize_params(self):
        """Test partial optimize with parameters.
        """
        x, y = Variable(1), Variable(1)
        gamma = Parameter()
        # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program)
        p1 = Problem(Minimize(x+y), [x+y>=gamma, y>=4, x>=5])
        gamma.value = 3
        p1.solve()

        # Solve the two-stage problem via partial_optimize
        p2 = Problem(Minimize(y), [x+y>=gamma, y>=4])
        g = partial_optimize(p2, [y], [x])
        p3 = Problem(Minimize(x+g), [x>=5])
        p3.solve()
        self.assertAlmostEqual(p1.value, p3.value)
Example #14
0
    def test_yield_constr_cost_min(self):
        # Create problem data.
        n = 10
        c = numpy.random.randn(n)
        P, q, r = numpy.eye(n), numpy.random.randn(n), numpy.random.randn()
        mu, Sigma = numpy.zeros(n), 0.1*numpy.eye(n)
        omega = NormalRandomVariable(mu, Sigma)
        m, eta = 100, 0.95

        # Create and solve optimization problem.
        x = Variable(n)
        yield_constr = prob(quad_form(x+omega,P)
                        + (x+omega).T*q + r >= 0, m) <= 1-eta
        p = Problem(Minimize(x.T*c), [yield_constr])
        p.solve()
        self.assert_feas(p)
Example #15
0
def ForceCanBeCounteractedByAccelerationVector(dt, Fp, u1min, u1max, u2min, u2max, plot=False) :

        ### question (1) : can we produce an acceleration ddW, such that it counteracts F?

        ## dynamics projected onto identity element, it becomes obvious that in an infinitesimal neighborhood, 
        ## we can only counteract forces along the x and the theta axes due to non-holonomicity

        dt2 = dt*dt/2

        ## span dt2-hyperball in Ndim
        F = dt2*Fp
        thetamin = dt2*u2min
        thetamax = dt2*u2max
        xmin = 0.0
        xmax = dt2*u1max

        Xlow = np.dot(np.dot(Rz(-pi/2),Rz(thetamin)),np.array((1,0,0)))
        Xhigh = np.dot(np.dot(Rz(pi/2),Rz(thetamax)),np.array((1,0,0)))

        Ndim = Fp.shape[0]
        if Fp.ndim <= 1:
                Nsamples = 1
        else:
                Nsamples = Fp.shape[1]
        p = Variable(3,Nsamples)

        constraints = []
        objfunc = 0.0
        for i in range(0,Nsamples):
                constraints.append( norm(p[:,i]) <= xmax )
                constraints.append( np.matrix(Xlow[0:3])*p[:,i] <= 0 )
                constraints.append( np.matrix(Xhigh[0:3])*p[:,i] <= 0 )
                if Fp.ndim <= 1:
                        objfunc += norm(p[:,i]-F[0:3])
                else:
                        objfunc += norm(p[:,i]-F[0:3,i])
                #objfunc.append(norm(p[:,i]-F[:,i]))

        objective = Minimize(objfunc)
        prob = Problem(objective, constraints)

        result = prob.solve(solver=SCS, eps=1e-7)

        #nearest_ddq = np.array(p.value)
        nearest_ddq = np.array(p.value/dt2)

        codimension = Ndim-nearest_ddq.shape[0]

        #print Ndim, nearest_ddq.shape
        #print codimension
        zero_rows = np.zeros((codimension,Nsamples))

        if nearest_ddq.shape[0] < Ndim:
                nearest_ddq = np.vstack((nearest_ddq,zero_rows))

        if plot:
                PlotReachableSetForceDistance(dt, u1min, u1max, u2min, u2max, -F, dt2*nearest_ddq)

        return nearest_ddq
Example #16
0
    def test_partial_problem(self):
        """Test grad for partial minimization/maximization problems.
        """
        for obj in [Minimize((self.a)**-1), Maximize(entr(self.a))]:
            prob = Problem(obj, [self.x + self.a >= [5, 8]])
            # Optimize over nothing.
            expr = cvxpy.partial_optimize(prob, dont_opt_vars=[self.x, self.a])
            self.a.value = None
            self.x.value = None
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], None)
            self.assertAlmostEqual(grad[self.x], None)
            # Outside domain.
            self.a.value = 1.0
            self.x.value = [5, 5]
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], None)
            self.assertAlmostEqual(grad[self.x], None)

            self.a.value = 1
            self.x.value = [10, 10]
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a])
            self.assertItemsAlmostEqual(grad[self.x].todense(), [0, 0, 0, 0])

            # Optimize over x.
            expr = cvxpy.partial_optimize(prob, opt_vars=[self.x])
            self.a.value = 1
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a] + 0)

            # Optimize over a.
            fix_prob = Problem(obj, [self.x + self.a >= [5, 8], self.x == 0])
            fix_prob.solve()
            dual_val = fix_prob.constraints[0].dual_variable.value
            expr = cvxpy.partial_optimize(prob, opt_vars=[self.a])
            self.x.value = [0, 0]
            grad = expr.grad
            self.assertItemsAlmostEqual(grad[self.x].todense(), dual_val)

            # Optimize over x and a.
            expr = cvxpy.partial_optimize(prob, opt_vars=[self.x, self.a])
            grad = expr.grad
            self.assertAlmostEqual(grad, {})
Example #17
0
def test_choose():
    """Test choose variable."""
    x = Variable((5, 4))
    y = Choose((5, 4), k=4)
    p = Problem(Minimize(sum(1 - x) + sum(x)), [x == y])
    result = p.solve(**solve_args)
    assert result[0] == approx(20)
    for v in np.nditer(x.value):
        assert v * (1 - v) == approx(0)
    assert x.value.sum() == approx(4)
Example #18
0
def mcFrobSolveLeftFactor_cvx(V, M_Omega, mask, **kwargs):
    """
    mcFrobSolveLeftFactor_cvx(V, M_Omega, mask, **kwargs)
    A solver for the left factor, U, in the problem
        min FrobNorm( P_Omega(U * V.T - M) )
    where U is an m-by-r matrix, V an n-by-r matrix.
    M_Omega is the set of observed entries in matrix form, while
    mask is a Boolean array with 1/True-valued entries corresponding 
    to those indices that were observed.

    This function is computed using the CVXPY package (and 
    thus is likely to be slower than a straight iterative 
    least squares solver).
    """
    # Options
    returnObjectiveValue = kwargs.get('returnObjectiveValue', False)
    solver = kwargs.get('solver', SCS)
    verbose = kwargs.get('verbose', False)

    if isinstance(verbose, int):
        if verbose > 1:
            verbose = True
        else:
            verbose = False

    # Parameters
    m = mask.shape[0]
    if V.shape[0] < V.shape[1]:
        # make sure V_T is "short and fat"
        V = V.T
    r = V.shape[1]

    Omega_i, Omega_j = matIndicesFromMask(mask)

    # Problem
    U = Variable(m, r)
    obj = Minimize(cvxnorm(cvxvec((U @ V.T)[Omega_i, Omega_j]) - M_Omega))
    prob = Problem(obj)
    prob.solve(solver=solver, verbose=verbose)
    if returnObjectiveValue:
        return (U.value, prob.value)
    else:
        return U.value
Example #19
0
    def test_simple_problem(self):
        # Create problem data.
        n = numpy.random.randint(1,10)
        eta = 0.95
        num_samples = 10

        c = numpy.random.rand(n,1)

        mu = numpy.zeros(n)
        Sigma = numpy.eye(n)
        a = NormalRandomVariable(mu, Sigma)

        b = numpy.random.randn()

        # Create and solve optimization problem.
        x = Variable(n)
        p = Problem(Maximize(x.T*c), [prob(max_entries(x.T*a-b) >= 0, num_samples) <= 1-eta])
        p.solve()
        self.assert_feas(p)
Example #20
0
def calc_Koopman(Yf, Yp, flag=1):
    solver_instance = cvxpy.CVXOPT
    #solver_instance = cvxpy.ECOS;
    if flag == 1:  # moore penrose inverse, plain ol' least squares Koopman
        #Yp_inv = np.dot(np.transpose(Yp_final), np.linalg.inv( np.dot(Yp_final,np.transpose(Yp_final)) )   );
        Yp_inv = np.linalg.pinv(Yp)
        K = np.dot(Yf, Yp_inv)

    if flag == 2:  # cvx optimization approach - L2 + L1 lasso
        norm1_term = 0.0
        all_col_handles = [None] * Yf.shape[0]
        for i in range(0, Yf.shape[0]):
            all_col_handles[i] = Variable(Yf.shape[0], 1)
            norm1_term = norm1_term + norm2(all_col_handles[i])

        operator = all_col_handles[0]
        for i in range(1, Yf.shape[0]):
            operator = cvxpy.hstack(operator, all_col_handles[i])

        print "[INFO]: CVXPY Koopman operator variable: " + repr(operator)
        print "[INFO]: Yf.shape in calc_Koopman: " + repr(Yf.shape)
        norm2_fit_term = norm2(norm2(Yf - operator * Yp, axis=0))
        objective = Minimize(norm2_fit_term + norm1_term)
        constraints = []
        prob = Problem(objective, constraints)
        result = prob.solve(verbose=True, solver=solver_instance)
        print "[INFO]: Finished executing cvx solver, printing CVXPY problem status"
        print(prob.status)
        K = operator.value

    if flag == 3:
        operator = Variable(Yf.shape[0], Yf.shape[0])
        objective = Minimize(cvxpynorm(operator, 2))
        constraints = [
            cvxpynorm(Yf - operator * Yp, 'fro') / cvxpynorm(Yf, 'fro') < 0.01
        ]
        prob = Problem(objective, constraints)
        result = prob.solve(verbose=True)  #(solver=solver_instance);
        print(prob.status)
        K = operator.value

    return K
Example #21
0
def test_card():
    """Test card variable."""
    x = Card(5, k=3, M=1)
    p = Problem(Maximize(sum(x)), [x <= 1, x >= 0])
    result = p.solve(**solve_args)

    assert result[0] == approx(3)
    for v in np.nditer(x.value):
        assert v * (1 - v) == approx(0)
    assert x.value.sum() == approx(3)

    # Should be equivalent to x == choose.
    x = Variable((5, 4))
    c = Choose((5, 4), k=4)
    b = Boolean((5, 4))
    p = cp.Problem(Minimize(sum(1 - x) + sum(x)), [x == c, x == b])
    result = p.solve(**solve_args)
    assert result[0] == approx(20)
    for v in np.nditer(x.value):
        assert v * (1 - v) == approx(0)
Example #22
0
    def test_simple_problem(self):
        # Create problem data.
        n = numpy.random.randint(1, 10)
        eta = 0.95
        num_samples = 10

        c = numpy.random.rand(n, 1)

        mu = numpy.zeros(n)
        Sigma = numpy.eye(n)
        a = NormalRandomVariable(mu, Sigma)

        b = numpy.random.randn()

        # Create and solve optimization problem.
        x = Variable(n)
        p = Problem(
            Maximize(x.T * c),
            [prob(max_entries(x.T * a - b) >= 0, num_samples) <= 1 - eta])
        p.solve()
        self.assert_feas(p)
Example #23
0
    def test_partial_problem(self):
        """Test domain for partial minimization/maximization problems.
        """
        for obj in [Minimize((self.a)**-1), Maximize(log(self.a))]:
            prob = Problem(obj, [self.x + self.a >= [5, 8]])
            # Optimize over nothing.
            expr = cvxpy.partial_optimize(prob, dont_opt_vars=[self.x, self.a])
            dom = expr.domain
            constr = [self.a >= -100, self.x >= 0]
            prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr)
            prob.solve()
            self.assertAlmostEqual(prob.value, 13)
            assert self.a.value >= 0
            assert np.all((self.x + self.a - [5, 8]).value >= -1e-3)

            # Optimize over x.
            expr = cvxpy.partial_optimize(prob, opt_vars=[self.x])
            dom = expr.domain
            constr = [self.a >= -100, self.x >= 0]
            prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr)
            prob.solve()
            self.assertAlmostEqual(prob.value, 0)
            assert self.a.value >= 0
            self.assertItemsAlmostEqual(self.x.value, [0, 0])

            # Optimize over x and a.
            expr = cvxpy.partial_optimize(prob, opt_vars=[self.x, self.a])
            dom = expr.domain
            constr = [self.a >= -100, self.x >= 0]
            prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr)
            prob.solve()
            self.assertAlmostEqual(self.a.value, -100)
            self.assertItemsAlmostEqual(self.x.value, [0, 0])
Example #24
0
 def test_indicator(self):
     """Test indicator transform.
     """
     cons = [self.a >= 0, self.x == 2]
     obj = cvxpy.Minimize(self.a - sum_entries(self.x))
     expr = cvxpy.indicator(cons)
     assert expr.is_convex()
     assert expr.is_positive()
     prob = Problem(Minimize(expr) + obj)
     result = prob.solve()
     self.assertAlmostEqual(-4, result)
     self.assertAlmostEqual(0, self.a.value)
     self.assertItemsAlmostEqual([2, 2], self.x.value)
     self.assertAlmostEqual(0, expr.value)
     self.a.value = -1
     self.assertAlmostEqual(np.infty, expr.value)
Example #25
0
    def test_geo_mean(self):
        """Test domain for geo_mean
        """
        dom = geo_mean(self.x).domain
        prob = Problem(Minimize(sum_entries(self.x)), dom)
        prob.solve()
        self.assertAlmostEqual(prob.value, 0)

        # No special case for only one weight.
        dom = geo_mean(self.x, [0, 2]).domain
        dom.append(self.x >= -1)
        prob = Problem(Minimize(sum_entries(self.x)), dom)
        prob.solve()
        self.assertItemsAlmostEqual(self.x.value, [-1, 0])

        dom = geo_mean(self.z, [0, 1, 1]).domain
        dom.append(self.z >= -1)
        prob = Problem(Minimize(sum_entries(self.z)), dom)
        prob.solve()
        self.assertItemsAlmostEqual(self.z.value, [-1, 0, 0])
def WaypointsToWeights(waypts):
        ## fit a polynomial from a set of basis functions to estimate a N-dim curve

        Ndim = waypts.shape[0]
        Nsamples = waypts.shape[1]

        #######################################################################
        ## discretization of trajectory
        #######################################################################
        M = 500 ## points on precomputed functions
        K = 500  ## number of precomputed basis functions
        plotFunctionalSpace = True
        #######################################################################

        if M < Nsamples:
                print "ERROR: more waypoints than discretization, abord"
                sys.exit(0)

        constraints = []
        print np.around(waypts,2)

        ##### FUNC SPACE CONSTRAINTS
        T = np.linspace(0.0,1.0,M)
        F = Fpoly(T,K)
        dF = dFpoly(T,K)
        #F = Fchebyshev(T,K)
        #dF = dFchebyshev(T,K)

        print np.around(F,decimals=2)
        Weights = Variable(K,Ndim)

        if plotFunctionalSpace:
                plt.title('Basis Functions')
                Kp = min(10,K)
                print T.shape,F.shape
                for i in range(0,Kp):
                        plt.subplot(Kp, 1, i)
                        plot(T,F[i,:],'-r',markersize=5)        
                        plt.ylabel(i)
                plt.show()
        #print np.around(F,decimals=2)
        #sys.exit(0)

        dw = 1.0/float(Nsamples-1)
        ctr=0
        Twpt = np.zeros((Nsamples,1))
        
        for i in range(0,Nsamples):
                tidx = find_nearest_idx(T,i*dw)
                Twpt[ctr]=tidx
                ctr=ctr+1
                Ftmp = np.reshape(F[:,tidx],(K,1))
                constraints.append(norm(waypts[:,i] - Weights.T*Ftmp) <= 0.01)
                #constraints.append(waypts[:,i] == Weights.T*Ftmp)

        ## add smoothing condition
        for t in T[1:]:
                tidx = find_nearest_idx(T,t)
                Ftmp0 = np.reshape(F[:,tidx-1],(K,1))
                Ftmp1 = np.reshape(F[:,tidx],(K,1))
                constraints.append(norm(Weights.T*Ftmp0 - Weights.T*Ftmp1) <= 0.01)

        if plotFunctionalSpace:
                plt.title('Waypoints')
                plt.subplot(3, 1, 1)
                plot(Twpt,waypts[0,:].flatten(),'ok',markersize=10)        
                plt.ylabel('X')
                plt.subplot(3, 1, 2)
                plot(Twpt,waypts[1,:].flatten(),'ok',linewidth=3,markersize=10)        
                plt.ylabel('Y')
                plt.subplot(3, 1, 3)
                plot(Twpt,waypts[2,:].flatten(),'ok',linewidth=3,markersize=10)
                plt.ylabel('Z')
                plt.show()

        objective = Minimize(norm(Weights,1))
        prob = Problem(objective, constraints)

        #ECOS, ECOS_BB, CVXOPT, SCS
        #result = prob.solve(solver=SCS, use_indirect=True, eps=1e-2, verbose=True)
        #prob.solve(verbose=True, abstol_inacc=1e-2,reltol_inacc=1e-2,max_iters= 300, reltol=1e-2)
        result = prob.solve(solver=SCS, verbose=True)

        if plotFunctionalSpace:
                Y = np.zeros((M,Ndim))
                ctr=0
                for t in T:
                        tidx = find_nearest_idx(T,t)
                        Ftmp = np.reshape(F[:,tidx],(K,1))
                        WF = Weights.T.value*Ftmp
                        Y[ctr,0] = WF[0]
                        Y[ctr,1] = WF[1]
                        Y[ctr,2] = WF[2]
                        ctr=ctr+1
                plt.title('Waypoints')
                plt.subplot(3, 1, 1)
                plot(Twpt,waypts[0,:].flatten(),'ok',markersize=10)        
                plot(Y[:,0].flatten(),'or',markersize=3)        
                plt.ylabel('X')
                plt.subplot(3, 1, 2)
                plot(Twpt,waypts[1,:].flatten(),'ok',linewidth=3,markersize=10)        
                plot(Y[:,1].flatten(),'or',linewidth=3,markersize=3)        
                plt.ylabel('Y')
                plt.subplot(3, 1, 3)
                plot(Twpt,waypts[2,:].flatten(),'ok',linewidth=3,markersize=10)
                plot(Y[:,2].flatten(),'or',linewidth=3,markersize=3)        
                plt.ylabel('Z')

                plt.show()

        if not (prob.status == OPTIMAL):
                print "ERROR: infeasible cvx program"
                sys.exit(0)

        return [Weights.value,T,F,dF]
Example #27
0
#qp example
from cvxopt import matrix, solvers
from cvxopt.solvers import qp
from cvxpy import Variable
Q = matrix([[1.0,-1/2], [-1/2,2]])
f = matrix([-1.0,0])
A = matrix([[1.0,2],[1,-4],[5,76]])
b = matrix([-2.0,-3,1])

sol = qp(Q,f,A.T,b,None,None)
print sol['x']

from cvxpy import Minimize, Problem,norm2
#cholesky
L = matrix(np.linalg.cholesky(Q))
x = Variable(2,1)
objective = Minimize(norm2(L*x)+f.T*x)
constraints = [A.T*x <= b]
pro1 = Problem(objective, constraints)
print pro1.solve()
print x.value 


#purtube version of QP

Example #28
0
def create_schedule(n_days, inventory_start,
                    n_totes_washed_start, pars=None,
                    do_plot=True, verbose=True):
    """
    Demo an optimal supply chain scheduling with variable
    labor costs, and the concept of totes that hold a number of
    products. Totes need to be cleaned on a regular basis.
    :param pars: parameters from create_default_params
    :param do_plot: True if you want a plot created (default)
    :return: None
    """
    if pars is None:
        pars = create_default_params()

    days = np.arange(n_days)

    print 'creating demand'
    demand = create_demand(days)
    labor_costs = get_labor_costs(days, pars)

    # define variables which keep track of
    # production, inventory and number of totes washed per day

    print 'defining variables'
    production = Variable(n_days)
    sales = Variable(n_days)
    inventory = Variable(n_days)
    n_totes_washed = Variable(n_days)

    print 'calculating costs and profit'
    # calculate when the totes that were washed become dirty again
    shift_matrix = mu.time_shift_matrix(n_days,
                                        pars['days_until_cleaning'])

    n_totes_become_dirty = (shift_matrix*n_totes_washed)[:n_days]

    # calculate the number of clean totes on any day
    cum_matrix = mu.cumulative_matrix(n_days)

    n_washed_totes_available = n_totes_washed_start \
        + cum_matrix*(n_totes_washed - n_totes_become_dirty)

    print 'calculating total cost'

    # Minimize total cost which is
    # sum of labor costs, storage costs and washing costs

    total_cost = production.T*labor_costs + \
                 pars['storage_cost'] * sum(inventory) + \
                 pars['washing_tote_cost'] * sum(n_totes_washed)

    total_profit = pars['sales_price']*sum(sales)-total_cost

    print 'defining objective'
    objective = Maximize(total_profit)

    # Subject to these constraints

    constraints = make_constraints(production, sales, inventory, pars,
                                   n_washed_totes_available,
                                   n_totes_washed, demand, inventory_start)

    # define the problem and solve it

    problem = Problem(objective, constraints)

    solver = 'cvxpy'
    print 'solving with: %s' % solver
    start = time()
    problem.solve(verbose=verbose)

    finish = time()
    run_time = finish - start
    print 'Solve time: %s seconds' % run_time

    print "Status: %s" % problem.status
    if problem.status == 'infeasible':
        print "Problem is infeasible, no solution found"
        return

    n_items_sold = sum(sales.value)
    total_cost = problem.value
    total_washing_cost = pars['washing_tote_cost']*sum(n_totes_washed.value)
    total_labor_cost = (production.T*labor_costs).value
    total_storage_cost = sum(inventory.value)*pars['storage_cost']
    total_cost_per_item = problem.value/n_items_sold

    print "Total cost: %s" % total_cost
    print "Total labor cost: %s" % total_labor_cost
    print "Total washing cost: %s" % total_washing_cost
    print "Total storage cost: %s" % total_storage_cost
    print "Total cost/item: %s" % total_cost_per_item
    print "Total profit: %s" % total_profit.value

    if do_plot:
        plot_variables(days, production, inventory, sales, demand,
                       n_washed_totes_available)
        plt.clf()
Example #29
0
cvxopt.solvers.options['show_progress'] = False

# create problem data
m, n = 5, 10
A = cvxopt.normal(m,n)
tmp = cvxopt.uniform(n,1)
b = A*tmp

x = Variable(n)

p = Problem(
    Minimize(-sum(log(x))),
    [A*x == b]
)
status = p.solve()
cvxpy_x = x.value

def acent(A, b):
    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, cvxopt.matrix(1.0, (n,1))
        if min(x) <= 0.0: return None
        f = -sum(cvxopt.log(x))
        Df = -(x**-1).T
        if z is None: return f, Df
        H = cvxopt.spdiag(z[0] * x**-2)
        return f, Df, H
    sol = cvxopt.solvers.cp(F, A=A, b=b)
    return sol['x'], sol['primal objective']
Example #30
0
from cvxpy import Minimize, Variable, Problem, max, abs, sum
import numpy as np

n = 3
N = 30
A = np.matrix([[-1, 0.4, 0.8], [1, 0, 0], [0, 1, 0]])
b = np.matrix([1, 0, 0.3]).T
x0 = zeros((n, 1))
xdes = np.matrix([7, 2, -6]).T
x = Variable(n, N + 1)
u = Variable(1, N)
objective = Minimize(sum(max(abs(u), 2 * abs(u) - 1)))
constraints1 = [x[:, 1 : N + 1] == A * x[:, 0:N] + b * u]
constraints2 = [x[:, 0] == x0]
constraints3 = [x[:, N] == xdes]
constraints = constraints1 + constraints2 + constraints3
prob1 = Problem(objective, constraints)
prob1.solve()
print u.value
step(range(30), u.value.T)
Example #31
0
A3 = vstack([A,identity(m)*sqrt(rohs)])
b3  = vstack([ones((n,1)),sqrt(rohs)*0.5*ones((m,1))])
p_ls_reg = np.linalg.lstsq(A3, b3)[0]
val_ls_reg = np.max(np.abs(log(A*matrix(p_ls_reg))))
print p_ls_reg
print val_ls_reg


#solution 4 chebyshev approximation
from cvxpy import Minimize, normInf, Variable, Problem, inv_pos

x=Variable (m,1)
objective = Minimize(normInf(matrix(A)*x-ones((n,1))))
constraints =[x>=0,x<=1]
pro = Problem(objective, constraints) 
result = pro.solve()
print x.value
val_ls_chev = np.max(np.abs(log(A*matrix(x.value))))
print val_ls_chev

#solution 5 cvxpy

from cvxpy import max
y=Variable (m,1)
Am = matrix(A)
qq = [max(Am[i,:]*y,inv_pos(Am[i,:]*y)) for i in range(n)]
objective1 = Minimize(max(*qq))
constraints1 =[y>=0,y<=1]
pro1 = Problem(objective1, constraints1) 
result1 = pro1.solve()
print y.value
Example #32
0
 def test_nonnegative_variable(self):
     x = NonNegative()
     p = Problem(Minimize(5+x),[x>=3])
     p.solve()
     self.assertAlmostEqual(p.value,8)
     self.assertAlmostEqual(x.value,3)
Example #33
0
def main():
    argparser = argparse.ArgumentParser(description="Solves for the optimal set of queries to attempt in order to "
                                                    "maximize coverage during a symbolic execution.")
    argparser.add_argument('explorationgraph', help="The serialized exploration graph on which to solve the best "
                                                    "scheduling strategy.")
    argparser.add_argument('-d', '--debug', help="Enables debugging output.", action="store_true")
    argparser.add_argument('-v', '--verbose', help="Enables verbose output. Debugging output includes verbose output.",
                           action='store_true')

    args = argparser.parse_args()

    debuglogging = args.debug
    verboselogging = args.verbose

    if debuglogging:
        loglevel = logging.DEBUG
    elif verboselogging:
        loglevel = logging.INFO
    else:
        loglevel = logging.WARNING

    FORMAT = '%(asctime)s - %(levelname)s : %(message)s'
    logging.basicConfig(stream=sys.stderr, level=loglevel, format=FORMAT, datefmt='%m/%d/%Y %I:%M:%S %p')

    graph = pickle.load(open(args.explorationgraph, 'rb'))

    root = graph.root_constraint

    exploration_timeout = 0.1
    maximize_code = True
    maximize_branch = False
    assert(maximize_code != maximize_branch)


    ilp_constraints = []

    # Create constraint variables
    path_constraints = {} # {Constraint.id : ilp_constraint_id}
    for path_constraint in all_path_constraints(root):
        path_constraints[path_constraint.id] = len(path_constraints)
    ilp_path_constraints = Int(len(path_constraints))
    for ilp_path_constraint in ilp_path_constraints:
        ilp_constraints.append(ilp_path_constraint <= 1)
        ilp_constraints.append(ilp_path_constraint >= 0)

    # Create branch coverage variables
    if maximize_branch:
        branch_coverage = {} # {(filename, arc_origin, arc_dest) : ilp_branch_id}
        for path_constraint in all_path_constraints(root):
            for filename, arcs in path_constraint.branches_covered.items():
                for arc in arcs:
                    arc_origin, arc_dest = arc
                    if (filename, arc_origin, arc_dest) not in branch_coverage:
                        branch_coverage[(filename, arc_origin, arc_dest)] = len(branch_coverage)
        ilp_branches = Int(len(branch_coverage))
        for ilp_branch in ilp_branches:
            ilp_constraints.append(ilp_branch <= 1)
            ilp_constraints.append(ilp_branch >= 0)

    # Create code coverage variables
    if maximize_code:
        code_coverage = {} # {(filename, line): ilp_code_id}
        for path_constraint in all_path_constraints(root):
            for filename, lines in path_constraint.lines_covered.items():
                for line in lines:
                    if (filename, line) not in code_coverage:
                        code_coverage[(filename, line)] = len(code_coverage)
        ilp_code_lines = Int(len(code_coverage))
        for ilp_code_line in ilp_code_lines:
            ilp_constraints.append(ilp_code_line <= 1)
            ilp_constraints.append(ilp_code_line >= 0)

    # Calculate response times
    response_times = {} # {Constraint.id : response_time}
    for path_constraint in all_path_constraints(root):
        if path_constraint.parent is not None and path_constraint.parent.inputs == path_constraint.inputs:
            response_times[path_constraint.id] = 0.
        else:
            response_times[path_constraint.id] = path_constraint.solving_time

    # Constraint 1: Total solve time
    total_solve_time = 0
    for path_constraint_id, ilp_path_constraint_id in path_constraints.items():
        total_solve_time += ilp_path_constraints[ilp_path_constraint_id]*response_times[path_constraint_id]
    ilp_constraints.append(total_solve_time <= exploration_timeout)

    # Constraint 2: Seed input
    ilp_constraints.append(ilp_path_constraints[0] == 1)

    # Constraint 3: Branch discovery
    for path_constraint in all_path_constraints(root):
        parent = path_constraint.parent
        if parent is not None:
            # The constraint is only in the schedule if its parent is in the schedule
            ilp_constraints.append(ilp_path_constraints[path_constraints[path_constraint.id]] <= ilp_path_constraints[path_constraints[parent.id]])
            if extract_model(parent) == extract_model(path_constraint):
                ilp_constraints.append(ilp_path_constraints[path_constraints[path_constraint.id]] == ilp_path_constraints[path_constraints[parent.id]])

    # Constraint 4: Coverage
    ## A path constraint is "covering" a branch if the input that discovers the path constraint covers the branch.
    if maximize_branch:
        for branch, ilp_branch_var in branch_coverage.items():
            covering_path_constraints = 0
            for path_constraint in all_path_constraints(root):
                filename, arc_origin, arc_dest = branch
                if (arc_origin, arc_dest) in path_constraint.branches_covered.get(filename, set()):
                    covering_path_constraints += ilp_path_constraints[path_constraints[path_constraint.id]]
            assert(type(covering_path_constraints) != int)
            ilp_constraints.append(ilp_branches[ilp_branch_var] <= covering_path_constraints)

    if maximize_code:
        for code_line, ilp_code_line_var in code_coverage.items():
            covering_path_constraints = 0
            for path_constraint in all_path_constraints(root):
                filename, line = code_line
                if line in path_constraint.lines_covered.get(filename, set()):
                    covering_path_constraints += ilp_path_constraints[path_constraints[path_constraint.id]]
            assert(type(covering_path_constraints) != int)
            ilp_constraints.append(ilp_code_lines[ilp_code_line_var] <= covering_path_constraints)

    if maximize_branch:
        objective = Maximize(sum_entries(ilp_branches)) # Maximize branch coverage
    if maximize_code:
        objective = Maximize(sum_entries(ilp_code_lines)) # Maximize code coverage
    problem = Problem(objective, ilp_constraints)

    problem.solve()

    # Correctness assertions
    ## All constraints are 0 or 1 indicator variables
    for ilp_path_constraint_var in path_constraints.values():
        assert(0 <= round(ilp_path_constraints[ilp_path_constraint_var].value) <= 1)

    ## All branch coverage variables are 0 or 1
    if maximize_branch:
        for ilp_branch_var in branch_coverage.values():
            assert(0 <= round(ilp_branches[ilp_branch_var].value) <= 1)

    ## All code coverage variables are 0 or 1
    if maximize_code:
        for ilp_code_line_var in code_coverage.values():
            assert(0 <= round(ilp_code_lines[ilp_code_line_var].value) <= 1)

    ## Initial values are in the schedule and they have a response time of 0
    assert(round(ilp_path_constraints[path_constraints[0]].value) == 1)
    assert(response_times[path_constraints[0]] == 0)

    ## Constraints are discovered if used
    for path_constraint_id, ilp_path_constraint_var in path_constraints.items():
        if round(ilp_path_constraints[ilp_path_constraint_var].value) == 1:
            for path_constraint in all_path_constraints(root):
                if path_constraint.id == path_constraint_id and path_constraint.parent is not None:
                    assert(round(ilp_path_constraints[path_constraints[path_constraint.parent.id]].value) == 1)

    ## If input is used in constraint, then all constraints from that input are used as well
    for path_constraint_id, ilp_path_constraint_var in path_constraints.items():
        if round(ilp_path_constraints[ilp_path_constraint_var].value) == 1:
            path_constraint_input = None
            for path_constraint in all_path_constraints(root):
                if path_constraint_id == path_constraint.id:
                    path_constraint_input = extract_model(path_constraint)
            assert(path_constraint_input is not None or path_constraint_id == 0)
            for path_constraint in all_path_constraints(root):
                if extract_model(path_constraint) == path_constraint_input:
                    assert(round(ilp_path_constraints[path_constraints[path_constraint.id]].value) == 1)

    ## Branch coverage comes from discovered constraints
    if maximize_branch:
        for branch, ilp_branch_var in branch_coverage.items():
            if round(ilp_branches[ilp_branch_var].value) == 1:
                filename, arc_origin, arc_dest = branch
                assert(any((arc_origin, arc_dest) in path_constraint.branches_covered.get(filename, set()) for path_constraint
                           in all_path_constraints(root) if round(ilp_path_constraints[path_constraints[path_constraint.id]].value) == 1))

    ## Code coverage comes from discovered constraints
    if maximize_code:
        for code_line, ilp_code_line_var in code_coverage.items():
            if round(ilp_code_lines[ilp_code_line_var].value) == 1:
                filename, line = code_line
                assert(any(line in path_constraint.lines_covered.get(filename, set()) for path_constraint
                           in all_path_constraints(root) if round(ilp_path_constraints[path_constraints[path_constraint.id]].value) == 1))

    optimal_constraint_ids = set() # {Constraint.id}
    optimal_inputs = {} # {inputs : solving_time}
    optimal_branches = {} # {(source_file: str) : {(origin_line: int, dest_line: int)}}
    optimal_lines = {} # {(source_file: str) : {line: int}}
    for path_constraint_id, var_index in path_constraints.items():
        if bool(round(ilp_path_constraints[var_index].value)):
            optimal_constraint_ids.add(path_constraint_id)

    for path_constraint in all_path_constraints(root):
        if path_constraint.id in optimal_constraint_ids and not extract_model(path_constraint) is None:
            optimal_inputs[extract_model(path_constraint)] = path_constraint.solving_time
            for file, branches in path_constraint.branches_covered.items():
                if file in optimal_branches:
                    optimal_branches[file] |= branches
                else:
                    optimal_branches[file] = set(branches)
            for file, lines in path_constraint.lines_covered.items():
                if file in optimal_lines:
                    optimal_lines[file] |= lines
                else:
                    optimal_lines[file] = set(lines)

    print("Solver CPU: {} seconds".format(sum(optimal_inputs.values())))
    print("Path coverage: {} paths".format(len(optimal_inputs)))
    print("Line coverage: {} lines".format(sum(len(lines) for file, lines in optimal_lines.items())))
    print("Branch coverage: {} branches".format(sum(len(branches) for file, branches in optimal_branches.items())))
Example #34
-1
def cvxpy_solve_qp(P, q, G=None, h=None, A=None, b=None, initvals=None,
                   solver=None):
    """
    Solve a Quadratic Program defined as:

        minimize
            (1/2) * x.T * P * x + q.T * x

        subject to
            G * x <= h
            A * x == b

    calling a given solver using the CVXPY <http://www.cvxpy.org/> modelling
    language.

    Parameters
    ----------
    P : array, shape=(n, n)
        Primal quadratic cost matrix.
    q : array, shape=(n,)
        Primal quadratic cost vector.
    G : array, shape=(m, n)
        Linear inequality constraint matrix.
    h : array, shape=(m,)
        Linear inequality constraint vector.
    A : array, shape=(meq, n), optional
        Linear equality constraint matrix.
    b : array, shape=(meq,), optional
        Linear equality constraint vector.
    initvals : array, shape=(n,), optional
        Warm-start guess vector (not used).
    solver : string, optional
        Solver name in ``cvxpy.installed_solvers()``.

    Returns
    -------
    x : array, shape=(n,)
        Solution to the QP, if found, otherwise ``None``.
    """
    if initvals is not None:
        print("CVXPY: note that warm-start values are ignored by wrapper")
    n = q.shape[0]
    x = Variable(n)
    P = Constant(P)  # see http://www.cvxpy.org/en/latest/faq/
    objective = Minimize(0.5 * quad_form(x, P) + q * x)
    constraints = []
    if G is not None:
        constraints.append(G * x <= h)
    if A is not None:
        constraints.append(A * x == b)
    prob = Problem(objective, constraints)
    prob.solve(solver=solver)
    x_opt = array(x.value).reshape((n,))
    return x_opt