Ejemplo n.º 1
0
def solve_conic_program(G, wrench, mu):
    # the optimization variables are the forces on each tip
    f = cp.Variable(9)
    # dummy = cp.Variable(1)

    # the optimization objective is constant since it's actually a constraint satisfication problem
    objective = cp.Minimize(cp.sum_squares(f))

    # two constraints:
    # 1. forces meet the external force
    constraint1 = G @ f == wrench
    # 2. forces inside the friction cone
    constraint21 = cp.SOC(mu * f[2], f[0:2])
    constraint22 = cp.SOC(mu * f[5], f[3:5])
    constraint23 = cp.SOC(mu * f[8], f[6:8])
    constraints = [constraint1, constraint21, constraint22, constraint23]
    # constraints = [cp.norm(dummy) <= -1]

    # the optimization problem
    prob = cp.Problem(objective, constraints)

    # solve the problem
    prob.solve()
    print("The optimal value is", prob.value)
    print("A solution f is", f.value)
    if f.value is None:
        return None
    return np.array(f.value).reshape((3, 3))
Ejemplo n.º 2
0
def constrained_interval_cvxpy(y, K, h, m, alpha=0.05):
    """
    Performs the same constrained interval optimization as constrained_interval
    but uses cvxpy as the backend optimizer.

    Dimension key:
    - n : number of smear bins
    - m : number of true bins

    Parameters:
        y     (np arr) : cholesky transformed data -- n x 1
        K     (np arr) : cholesky transformed matrix -- n x m
        h     (np arr) : functional for parameter transform -- m x 1
        m     (np arr) : number of true bins
        alpha (float)  : coverage level

    Returns:
    """
    # find the slack factor
    opt_s2_ls = least_squares(fun=lambda x: y - K @ x,
                              x0=np.ones(m) * y.mean(),
                              bounds=([0] * m, [np.inf] * m))
    s2 = 2 * opt_s2_ls['cost']

    # find the constraint bound
    sq_err_constr = np.square(stats.norm(loc=0, scale=1).ppf(1 -
                                                             (alpha / 2))) + s2

    # define a variables to solve the problem
    m = K.shape[1]
    x_lb = cp.Variable(m)
    x_ub = cp.Variable(m)

    # define the constraints
    soc_constraints_lb = [
        cp.SOC(t=np.sqrt(sq_err_constr), X=y - K @ x_lb), x_lb >= 0
    ]
    soc_constraints_ub = [
        cp.SOC(t=np.sqrt(sq_err_constr), X=y - K @ x_ub), x_ub >= 0
    ]

    # define the problem
    prob_lb = cp.Problem(objective=cp.Minimize(h.T @ x_lb),
                         constraints=soc_constraints_lb)
    prob_ub = cp.Problem(objective=cp.Minimize(-h.T @ x_ub),
                         constraints=soc_constraints_ub)

    # solve the problem
    opt_lb = prob_lb.solve(solver='ECOS')
    opt_ub = prob_ub.solve(solver='ECOS')

    # check for convergence
    assert prob_lb.status == 'optimal'
    assert prob_ub.status == 'optimal'

    return opt_lb, -opt_ub
Ejemplo n.º 3
0
def unconstrained_interval(K, h, y, alpha, solver='ECOS'):
    """
    Uses combination of scipy.optimize and cvxopt to solve the
    optimization with no constraints.

    NOTE:
    - y and K should be the cholesky transformed versions of those variables

    Parameters:
        K      (np arr) : MxN smearing matrix - M # true bins,
                          N # smear bins
        h      (np arr) : Mx1 functional for original bins
        y      (np arr) : Nx1 observation vector
        alpha  (float)  : type error threshold
        solver (str)    : cvxopt optimizer
    """
    # set up the variables
    n = K.shape[1]
    x_lb = cp.Variable(n)
    x_ub = cp.Variable(n)

    # find the slack factor
    opt_s2_ls = least_squares(fun=lambda z: y - K @ z,
                              x0=np.ones(n) * y.mean())
    s2 = 2 * opt_s2_ls['cost']

    sq_err_constr = np.square(stats.norm(loc=0, scale=1).ppf(1 -
                                                             (alpha / 2))) + s2

    # define the constraints
    soc_constraints_lb = [cp.SOC(t=np.sqrt(sq_err_constr), X=y - K @ x_lb)]
    soc_constraints_ub = [cp.SOC(t=np.sqrt(sq_err_constr), X=y - K @ x_ub)]

    # define the problem
    prob_lb = cp.Problem(objective=cp.Minimize(h.T @ x_lb),
                         constraints=soc_constraints_lb)
    prob_ub = cp.Problem(objective=cp.Minimize(-h.T @ x_ub),
                         constraints=soc_constraints_ub)

    # solve the problem
    opt_lb = prob_lb.solve(solver=solver)
    opt_ub = prob_ub.solve(solver=solver)

    # determine if optimal solution is found
    assert prob_lb.status == 'optimal'
    assert prob_ub.status == 'optimal'

    return opt_lb, -opt_ub
def support(M,mu,alpha,pss,dss,nss,s,beta=None,hand_normal=None):
    w=cp.Variable(6)
    fss=[]
    cons=[]
    sumFN=0
    sumW=-w
    for i in range(len(pss)):
        fss.append(cp.Variable(3))
        fN=fss[-1].T@nss[i]
        fT=fss[-1]-fN*nss[i]
        #normal sum
        if hand_normal is not None:
            sumFN+=fN*math.exp(alpha*abs(dss[i])+beta*(1+np.dot(nss[i],hand_normal[i])))
        else:
            sumFN+=fN*math.exp(alpha*abs(dss[i]))
        #frictional cone
        cons.append(cp.SOC(fN*mu,fT))
        #sum of f
        f2w=np.concatenate((np.eye(3,3,dtype=np.float64),cross(pss[i])))
        sumW+=f2w@fss[-1]
    #normal sum
    cons.append(sumFN<=1)
    #sum of f
    cons.append(sumW==0)
    #objective
    if M is not None:
        obj=cp.Maximize(w.T@M@s)
    else: obj=cp.Maximize(w.T@s)
    prob=cp.Problem(obj,cons)
    prob.solve()
    return obj.value
Ejemplo n.º 5
0
 def solve_example_mixed_integer_socp(solver) -> None:
     x = cp.Variable(2)
     y = cp.Variable()
     z = cp.Variable(integer=True)
     quadratic = cp.sum_squares(x + z)
     problem = cp.Problem(cp.Minimize(quadratic), [cp.SOC(y, x)])
     problem.solve(solver=solver)
Ejemplo n.º 6
0
 def _build_reg_constraints(self):
     self.reg_var = 0
     if len(self.sage_reg_variables) > 0:
         self.reg_var = cp.Variable(name='regvar')
         cvx_reg_vars = map(self._get_cvx_var_from_sage_var,
                            self.sage_reg_variables)
         self._cvx_socp_constraints.append(cp.SOC(self.reg_var,
                                             cp.hstack(cvx_reg_vars)))
Ejemplo n.º 7
0
    def _build_segments_constraints(self):
        self.segment_vars = [cp.Variable(name=f's{i}') for i in
                             range(len(self._sage_linear_segments))]

        cvx_segments = map(lambda u: [self._get_cvx_expr_from_sage_expr(ui) for ui in u],
                           self._sage_linear_segments)
        self._cvx_socp_constraints += [
            cp.SOC(si,
                   cp.hstack(segment_i)) for si, segment_i in zip(self.segment_vars, cvx_segments)]
Ejemplo n.º 8
0
 def test_soc(self):
     """Test with SOC.
     """
     x = Variable(2, complex=True)
     t = Variable()
     prob = Problem(cvx.Minimize(t), [cvx.SOC(t, x), x == 2j])
     result = prob.solve()
     self.assertAlmostEqual(result, 2*np.sqrt(2))
     self.assertItemsAlmostEqual(x.value, [2j, 2j])
def socp(y, D, n=784, eta=2.7):
    x = cp.Variable(n)
    c = np.zeros((n))
    constraints = [cp.SOC(c.T * x + eta, D * x - y)]

    # Form objective.
    obj = cp.Minimize(cp.norm(x, 1))

    # Form and solve problem.
    prob = cp.Problem(obj, constraints)
    prob.solve()

    return x.value
Ejemplo n.º 10
0
def min_sinr_req(min_sinr, h, w, w_all, P_other=0.0):
    # h, w are real
    # h is normalized wrt noise: h = h / sigma
    # h is the channel state of a single user: (M, 2) M is antenna count
    # w is the precoding vec (M, 2)
    # w_other is an iterable of all w that can have an interference
    sig = cahb_r(h, w)
    intf_noise = [cahb_r(h, w_j)
                  for w_j in w_all] + [cahb_i(h, w_j) for w_j in w_all]
    intf_noise.append(1.0)  # normalized noise
    intf_noise.append(np.sqrt(P_other))
    t = np.sqrt(1 + 1.0 / min_sinr) * sig
    # rhs = np.sqrt(all_power + 1.0 + other)
    cons_soc = cp.SOC(
        t,  # the scalar
        cp.vstack(intf_noise)  # a vector, whose 2-norm is leq t
    )
    return cons_soc
def solve_cvxopt(cb_m, cb_N, H, T_i, i):
    n = cb_m + 1

    f = np.zeros(n)
    f[cb_m] = 1
    A = np.diag(np.append(np.ones(cb_m), np.zeros(1)))
    b = np.append(H.T[i], np.zeros(1))
    c = np.zeros(n)
    d = math.sqrt(T_i)
    F = np.append(np.delete(H, i, axis=1),
                  -np.ones(cb_N - 1).reshape(1, cb_N - 1),
                  axis=0).T
    g = np.zeros(cb_N - 1)

    # Define and solve the CVXPY problem.
    x = cp.Variable(n)
    # We use cp.SOC(t, x) to create the SOC constraint ||x||_2 <= t.
    soc_constraints = [cp.SOC(c.T @ x + d, A @ x - b)]
    prob = cp.Problem(cp.Minimize(f.T @ x), soc_constraints + [F @ x <= g])
    prob.solve()
    return x.value[:cb_m]
Ejemplo n.º 12
0
def solve_socp(x, As, bs, cs, ds, F, g, h, verbose=False):
    """
    Solves an SOCP of the form:

    minimize(h^T x)
    subject to:
        ||A_i x + b_i||_2 <= c_i^T x + d_i    for all i
        F x == g

    Args:
        x       - cvx variable.
        As      - list of A_i numpy matrices.
        bs      - list of b_i numpy vectors.
        cs      - list of c_i numpy vectors.
        ds      - list of d_i numpy vectors.
        F       - numpy matrix.
        g       - numpy vector.
        h       - numpy vector.
        verbose - whether to print verbose cvx output.

    Return:
        x - the optimal value as a numpy array, or None if the problem is
            infeasible or unbounded.
    """
    objective = cp.Minimize(h.T @ x)
    constraints = []
    for A, b, c, d in zip(As, bs, cs, ds):
        constraints.append(cp.SOC(c.T @ x + d, A @ x + b))
    constraints.append(F @ x == g)
    prob = cp.Problem(objective, constraints)
    prob.solve(verbose=verbose)

    if prob.status in ['infeasible', 'unbounded']:
        return None

    return x.value
Ejemplo n.º 13
0
    def fit(self, X, y):
        """
                Fit the model according to the given training data.
                Parameters
                ----------
                X : {array-like, sparse matrix} of shape (n_samples, n_features)
                    Training vector, where n_samples is the number of samples and
                    n_features is the number of features.
                y : array-like of shape (n_samples,)
                    Target vector relative to X.
        """
        dim = X.shape[1]
        beta = cp.Variable(dim)  # coeefficients
        b = cp.Variable(1)  # intercept
        t = cp.Variable(1)
        if self.fit_intercept:
            log_likelihood = cp.sum(
                cp.multiply(y, X @ beta + b) - cp.logistic(X @ beta + b))
        else:
            log_likelihood = cp.sum(
                cp.multiply(y, X @ beta) - cp.logistic(X @ beta))

        cons = []
        if self.reg:
            cons.append(cp.SOC(t, beta))
        else:
            cons.append(t == 0)

        self.problem = cp.Problem(
            cp.Maximize(log_likelihood / dim - self.reg * t), cons)
        self.problem.solve(solver=cp.ECOS, abstol=1e-15, verbose=False)
        self.coef_ = beta.value
        if self.fit_intercept:
            self.intercept_ = b.value
        else:
            self.intercept_ = 0
def consensuscforwardkl(meanlist, covariancelist, delta_r, prediction):

    num_posteriors = len(meanlist)
    n = len(meanlist[0])
    # Define optimization variables
    weights = cp.Variable(num_posteriors)
    gamma = cp.Variable(n)
    # v, s, vt = np.linalg.svd(covariancelist[0].values)

    s, v = np.linalg.eigh(covariancelist[0])

    s_list = []

    # s_list.append(s)

    # s, v = np.linalg.eigh(covariancelist[1].values)
    # s_list.append(s)

    # for i in np.arange(1,num_posteriors,1):
    for i in range(num_posteriors):
        s_temp = np.zeros(n)
        for j in range(n):
            s_temp[j] = np.inner(v[:, j], np.matmul(covariancelist[i], v[:,
                                                                         j]))
        s_list.append(s_temp)

    c = np.zeros((num_posteriors, n))

    for i in range(num_posteriors):
        for j in range(n):
            c[i, j] = np.inner(v[:, j], meanlist[i]) / s_list[i][j]

    sv_matrix = np.array(s_list)
    inverse_sv = 1.0 / sv_matrix

    #objective_fun = [cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights)),-1) for j in range(n)]

    obj = cp.sum([gamma[j] for j in range(n)])

    # Run optimization
    objective = cp.Minimize(obj)
    delta = delta_r * max([
        np.abs(c[-1, j] / inverse_sv[-1, j] - v[:, j].dot(prediction))
        for j in range(n)
    ])
    constraints = [weights >= 0, cp.sum(weights) == 1]
    for j in range(n):
        constraints.append(
            cp.sum(cp.multiply(c[:, j], weights)) <=
            (delta + v[:, j].dot(prediction)) *
            cp.sum(cp.multiply(inverse_sv[:, j], weights)))
        constraints.append(
            cp.sum(cp.multiply(c[:, j], weights)) >=
            (-delta + v[:, j].dot(prediction)) *
            cp.sum(cp.multiply(inverse_sv[:, j], weights)))
        #constraints.append(4 +  cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights))-gamma[j],2)<= cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights))+gamma[j],2))
        A = np.zeros((2, num_posteriors))
        B = np.zeros((2, n))
        B[1, j] = 1
        for i in range(num_posteriors):
            A[1, i] = inverse_sv[i, j]
        C = np.zeros(2)
        C[0] = 2
        constraints.append(
            cp.SOC(A[1, :] @ weights + B[1, :] @ gamma,
                   A @ weights - B @ gamma + C))

    prob = cp.Problem(objective, constraints)

    prob.solve()

    solution = weights.value
    #print(solution)
    #print(solution)

    final_sigma = scipy.linalg.inv(
        sum([
            solution[i] * scipy.linalg.inv(covariancelist[i])
            for i in range(num_posteriors)
        ]))
    final_mu = final_sigma.dot(
        sum([
            solution[i] *
            np.inner(scipy.linalg.inv(covariancelist[i]), meanlist[i])
            for i in range(num_posteriors)
        ]))

    return solution, final_mu, final_sigma
Ejemplo n.º 15
0
import cvxpy as cp

# Setting up the Data
m, n = 5, 20
n_i = p = 10
x0 = np.random.randn(n)
f = np.random.randn(n)
A, b, c, d = [], [], [], []

for i in range(m):
    A.append(np.random.rand(n_i, n))
    b.append(np.random.randn(n_i))
    c.append(np.random.randn(n))
    d.append(cp.norm(A[i] @ x0 + b, 2) - c[i].T @ x0)
F = np.random.randn(p, n)
g = F @ x0

# Constructing the problem
x = cp.Variable(n)
soc_constraints = [
    cp.SOC(c[i].T @ x + d[i], A[i] @ x + b[i]) for i in range(m)
]

prob = cp.Problem(cp.Minimize(f.T @ x), soc_constraints + [F @ x == g])
prob.solve()

print("Found Optimal Solution = ", prob.value)
for i in range(m):
    print("SOC constraint %d dual variable solution" % i)
    print(soc_constraints[i].dual_value)
Ejemplo n.º 16
0
    def disaggregate_chunk(self, test_mains_list):

        means_vector = self.means_vector
        pi_s_vector = self.pi_s_vector
        means_vector = self.means_vector
        transmat_vector = self.transmat_vector

        test_mains_list = pd.concat(test_mains_list, axis=0)
        expression = 0
        sigma = np.ones((len(test_mains_list)))  # The initial vector of Sigmas

        test_mains_big = test_mains_list.values.flatten().reshape((-1, 1))
        #print (len(test_mains))
        arr_of_results = []
        for test_block in range(
                int(math.ceil(len(test_mains_big) / self.time_period))):
            test_mains = test_mains_big[test_block *
                                        (self.time_period):(test_block + 1) *
                                        self.time_period]

            for epoch in range(6):
                if epoch % 2 == 1:
                    # The alernative Minimization
                    usage = np.zeros((len(test_mains)))
                    for appliance_id in range(self.num_appliances):
                        s_v = s_[appliance_id]
                        s_v = np.where(s_v > 1, 1, s_v)
                        s_v = np.where(s_v < 0, 0, s_v)
                        app_usage = np.sum(s_v @ means_vector[appliance_id],
                                           axis=1)
                        usage += app_usage
                    sigma = test_mains.flatten() - usage.flatten()

                if epoch % 2 == 0:
                    constraints = []
                    cvx_state_vectors = []
                    cvx_variable_matrices = []
                    delta = cvx.Variable(shape=(len(test_mains), 1),
                                         name='delta_t')
                    for appliance_id in range(self.num_appliances):
                        state_vector = cvx.Variable(
                            shape=(len(test_mains), self.default_num_states),
                            name='state_vec-%s' % (appliance_id))
                        cvx_state_vectors.append(state_vector)

                        # Enforcing that their values are ranged
                        constraints += [cvx_state_vectors[appliance_id] >= 0]
                        constraints += [cvx_state_vectors[appliance_id] <= 1]

                        # Enforcing that sum of states equals 1
                        for t in range(len(test_mains)):  # 6c
                            constraints += [
                                cvx.sum(
                                    cvx_state_vectors[appliance_id][t]) == 1
                            ]

                        # Creating Variable matrices for every appliance

                        appliance_variable_matrix = []
                        for t in range(len(test_mains)):

                            matrix = cvx.Variable(
                                shape=(self.default_num_states,
                                       self.default_num_states),
                                name='variable_matrix-%s-%d' %
                                (appliance_id, t))
                            appliance_variable_matrix.append(matrix)

                        cvx_variable_matrices.append(appliance_variable_matrix)

                        # Enforcing that their values are ranged
                        for t in range(len(test_mains)):
                            constraints += [
                                cvx_variable_matrices[appliance_id][t] >= 0
                            ]
                            constraints += [
                                cvx_variable_matrices[appliance_id][t] <= 1
                            ]

                        # Constraint 6e
                        for t in range(0, len(test_mains)):  # 6e
                            for i in range(self.default_num_states):
                                constraints += [
                                    cvx.sum(cvx_variable_matrices[appliance_id]
                                            [t][i]) ==
                                    cvx_state_vectors[appliance_id][t][i]
                                ]
                        # Constraint 6d
                        for t in range(1, len(test_mains)):  # 6d
                            for i in range(self.default_num_states):
                                constraints += [
                                    cvx.sum(
                                        (cvx_variable_matrices[appliance_id][t]
                                         ).T[i]) ==
                                    cvx_state_vectors[appliance_id][t - 1][i]
                                ]
                    # Second order cone constraints
                    soc_constraints = []

                    total_observed_reading = np.zeros((test_mains.shape))

                    #print (len(cvx_state_vectors))
                    for appliance_id in range(self.num_appliances):
                        total_observed_reading += cvx_state_vectors[
                            appliance_id] @ means_vector[appliance_id]

                    for t in range(len(test_mains)):
                        soc_constraints += [
                            cvx.SOC(delta[t],
                                    test_mains[t] - total_observed_reading[t])
                        ]

                    constraints += soc_constraints
                    # intializing the Expression

                    expression = 0
                    for appliance_id in range(self.num_appliances):

                        # First loop is over appliances

                        variable_matrix = cvx_variable_matrices[appliance_id]

                        transmat = transmat_vector[appliance_id]
                        # Next loop is over different time-stamps

                        for matrix in variable_matrix:
                            expression -= cvx.sum(
                                cvx.multiply(matrix, np.log(transmat)))

                        one_hot_states = cvx_state_vectors[appliance_id]
                        pi = pi_s_vector[appliance_id]

                        # The expression involving start states
                        first_one_hot_states = one_hot_states[0]
                        #print ("Pis")
                        #print (first_one_hot_states.shape)
                        #print (pi.shape)
                        expression -= cvx.sum(
                            cvx.multiply(first_one_hot_states, np.log(pi)))

                    #print (delta.shape)
                    #print (sigma.shape)

                    for t in range(len(test_mains)):
                        #print (delta[i].shape)
                        #print (sigma[i].shape)
                        if sigma[t] > .8:
                            expression += .5 * (delta[t][0] / (sigma[t]**2))
                        else:
                            expression += .5 * (delta[t][0])

                    expression = cvx.Minimize(expression)
                    constraints += [delta >= 0]
                    u = time.time()
                    #print (sigma.shape)
                    prob = cvx.Problem(expression, constraints)
                    #prob.solve(solver=cvx.ECOS_BB)
                    prob.solve(cvx.SCS, verbose=False)
                    print(prob.value)
                    print(time.time() - u)
                    s_ = [i.value for i in cvx_state_vectors]
                    #                     print (delta.value)
                    #                     print (s_[0])
                    #                     print (np.sum(s_[0],axis=1))
                    #                     print (cvx_variable_matrices[0][0].value)
                    #                     print (cvx_variable_matrices[0][1].value)
                    print("Over Iteration")
                    print("\n\n")

            prediction_dict = {}

            for appliance_id in range(self.num_appliances):
                app_name = self.appliances[appliance_id]

                app_usage = np.sum(
                    s_[appliance_id] @ means_vector[appliance_id], axis=1)
                prediction_dict[app_name] = app_usage.flatten()
                #usage+=app_usage

            arr_of_results.append(
                pd.DataFrame(prediction_dict, dtype='float32'))

        return [pd.concat(arr_of_results, axis=0)]
Ejemplo n.º 17
0
    def cvx_solver(self):
        '''
        This method solves the defined problem using the CVX solver.

        Returns:
            A dict holding the solve info

        See:
            https://www.cvxpy.org/examples/basic/socp.html

        Example:
            >>> prob.cvx_solver()
                {
                    'sol': solution,
                    'f_val': min value,
                    'iters': number of iterations,
                    'cpu_time': time needed for solving
                }
        '''
        if self.name is not None:
            _logger.info(f"Solving problem {self.name} using CVX ...")
        # Get the A matrix
        A, _, _, _ = self.extract_Q_blocks(self.Q_list[0])
        m, n = A.shape

        # SOC primal solution
        # x = cp.Variable((n, 1))
        x = cp.Variable(n)

        if self.format == 'sparse':
            b = np.reshape(np.array(self.b[0:-1].todense()), m)
        else:
            b = np.reshape(self.b[:-1], m)

        # Setting the equality constraint
        # soc_eq_constr = [A @ x == self.b[0:-1]]
        soc_eq_constr = [A @ x == b]

        del A

        # Setting the SOC inequality constraints
        # https://buildmedia.readthedocs.org/media/pdf/cvxpy/latest/cvxpy.pdf
        soc_ineq_constr = []
        for Q_mat in self.Q_list[1:]:
            Q_blk, q, f, d = self.extract_Q_blocks(Q_mat)
            if self.format == 'sparse':
                f = np.reshape(np.array(f.todense()), n)
                q = np.reshape(np.array(q.todense()), Q_blk.shape[0])
            else:
                f = np.reshape(f, n)
                q = np.reshape(q, Q_blk.shape[0])

            soc_ineq_constr.append(cp.SOC(f @ x + d, Q_blk @ x + q))

        del Q_blk
        del q
        del f
        del d

        # Define the whole problem
        prob = cp.Problem(cp.Minimize(self.c[0, 0:-1] @ x),
                          constraints=(soc_eq_constr + soc_ineq_constr))

        # Solve the problem
        start_time = datetime.now()
        prob.solve(verbose=True)
        cpu_time = datetime.now() - start_time

        solution = x.value
        iters = prob.solver_stats.num_iters

        return {
            'sol': solution,
            'f_val': prob.value,
            'iters': iters,
            'cpu_time': cpu_time
        }
Ejemplo n.º 18
0
 def solve_example_socp(solver) -> None:
     x = cp.Variable(2)
     y = cp.Variable()
     quadratic = cp.sum_squares(x)
     problem = cp.Problem(cp.Minimize(quadratic), [cp.SOC(y, x)])
     problem.solve(solver=solver)
Ejemplo n.º 19
0
                A_new_2[2,3*neighb_ind_var+2] = 1
                A_new = A_new_1-A_new_2
                b_new = np.zeros(3)

            A.append(A_new)
            b.append(b_new)
            c.append(np.zeros(3*n_points))
            d.append(dist[j])
    F = np.random.randn(p, 3*n_points)
    g = F@x0


    # Define and solve the CVXPY problem.
    x = cp.Variable(3*n_points)
    # We use cp.SOC(t, x) to create the SOC constraint ||x||_2 <= t.
    soc_constraints = [cp.SOC(c[i].T@x + d[i], A[i]@x + b[i]) for i in range(k_neigh*n_points)
    ]
    prob = cp.Problem(cp.Minimize(f.T@x), soc_constraints + [F@x == g])
    prob.solve()

    # Print result.
    print("The optimal value is", prob.value)
    print("A solution x is")
    print(x.value)

    x_val = np.reshape(x.value, [n_points,3])
    print('error:', check_error_2(cloud_new, x_val, N,D, n_fixed))
    # plot the points
    cloud_optimized = cloud_new
    cloud_optimized[n_fixed:n_fixed+n_points] = x_val
    xyz = np.dstack((cloud_optimized[:,1], cloud_optimized[:,2],
Ejemplo n.º 20
0
def solve_dwd_socp(X, y, C=1.0, sample_weight=None, solver_kws={}):
    """
    Solves distance weighted discrimination optimization problem.

    Solves problem (2.7) from https://arxiv.org/pdf/1508.05913.pdf

    Parameters
    ----------
    X: (n_samples, n_features)

    y: (n_samples, )

    C: float
        Strictly positive tuning parameter.

    sample_weight: None, (n_samples, )
        Weights for samples.

    solver_kws: dict
        Keyword arguments to cp.solve

    Returns
    ------
    beta: (n_features, )
        DWD normal vector.

    intercept: float
        DWD intercept.

    eta, d: float
        Optimization variables.

    problem: cp.Problem

    """

    if C < 0:
        raise ValueError("Penalty term must be positive; got (C={})".format(C))

    # TODO: add sample weights
    if sample_weight is not None:
        raise NotImplementedError

    X, y = check_X_y(X, y, accept_sparse='csr', dtype='numeric')

    # convert y to +/- 1
    y = pm1(y)

    n_samples, n_features = X.shape

    # problem data
    X = cp.Parameter(shape=X.shape, value=X)
    y = cp.Parameter(shape=y.shape, value=y)
    C = cp.Parameter(value=C, nonneg=True)

    # optimization variables
    beta = cp.Variable(shape=n_features)
    intercept = cp.Variable()
    eta = cp.Variable(shape=n_samples, nonneg=True)

    rho = cp.Variable(shape=n_samples)
    sigma = cp.Variable(shape=n_samples)

    # objective funtion
    # TODO: check this is correct way to do sample weighting
    if sample_weight is None:
        v = np.ones(n_samples)
    else:
        v = np.array(sample_weight).reshape(-1)
        assert len(v) == n_samples
    objective = v.T @ (rho + sigma + C * eta)

    # setup constraints
    # TODO: do we need explicit SOCP constraints?
    Y_tilde = cp.diag(y)  # TODO: make sparse
    constraints = [
        rho - sigma == Y_tilde @ X @ beta + intercept * y + eta,
        cp.SOC(cp.Parameter(value=1), beta)
    ]  # ||beta||_2^2 <= 1

    # rho^2 - sigma^2 >= 1
    constraints.extend(
        [cp.SOC(rho[i], cp.vstack([sigma[i], 1])) for i in range(n_samples)])

    # solve problem
    problem = cp.Problem(cp.Minimize(objective), constraints=constraints)

    problem.solve(**solver_kws)

    # d = rho - sigma
    # rho = (1/d + d), sigma = (1/d - d)/2
    d = rho.value - sigma.value

    return beta.value, intercept.value, eta.value, d, problem
Ejemplo n.º 21
0
    def weight_updates(self, real_data, real_labels, vis=True):
        # Module for updating weights

        if self.weight_update_type == 'discrete':
            m = self.num_datapoints

            # Populating discriminator outputs
            disc_arr = torch.zeros(m, )
            with torch.no_grad():
                for data in self.dataloader:
                    inp, labels, indices = data
                    inp = inp.to(self.device)

                    disc_out = self.netD(inp, real_labels)
                    disc_out = disc_out.view(-1)
                    disc_out = disc_out.cpu()
                    disc_arr[indices] = disc_out

            if self.config.disc_momentum > 0:
                disc_arr = disc_arr * (
                    1 - self.config.disc_momentum
                ) + self.config.disc_momentum * self.disc_vector.cpu()
            disc_arr = disc_arr.detach().numpy()

            # Solving convex optimization problem
            # Note: we are using normalized weights
            weight_arr = cp.Variable((self.num_datapoints, ))
            ones = np.ones(m)

            soc_const = cp.Constant(np.sqrt(2 * self.config.rho * m))
            constraints = [
                cp.SOC(soc_const, (weight_arr - ones)),
                cp.matmul(weight_arr.T, ones) == m, weight_arr >= 0
            ]
            objective = cp.Minimize(cp.matmul(weight_arr.T, disc_arr))

            prob = cp.Problem(objective, constraints)
            result = prob.solve(solver='SCS')

            weight_res = weight_arr.value
            weight_res = torch.from_numpy(weight_res)

            self.weight_vector.copy_(weight_res)

        else:
            self.optimizerW.zero_grad()
            real_weights = self.netW(real_data, real_labels) + self.eps
            real_weights = (real_weights /
                            real_weights.sum()) * self.config.batchSize
            real_logits = self.netD(real_data, real_labels)

            # Chi-squared
            soft_constraint = 100 * F.relu(
                torch.mean(0.5 * ((real_weights - 1)**2)) - self.config.rho)

            # Total variation
            # soft_constraint = 1000 * F.relu(torch.mean(0.5 * torch.abs(real_weights - 1)) - self.config.rho)

            loss_weights = self.weight_loss_fn(real_logits,
                                               real_weights) + soft_constraint
            loss_weights.backward()
            self.optimizerW.step()

            if vis:
                img_path = osp.join(self.config.logdir, 'samples')
                Path(img_path).mkdir(parents=True, exist_ok=True)
                real_weights_sorted, indices = torch.sort(
                    real_weights.view(-1))
                print('Weights')
                print(real_weights_sorted)
                print('Soft constraint: {}'.format(soft_constraint.item()))
                vutils.save_image(real_data[indices, ::] * 0.5 + 0.5,
                                  '{}/real_vis.png'.format(img_path))
                torch.save(real_weights_sorted,
                           '{}/weights.pth'.format(img_path))
Ejemplo n.º 22
0
    def disaggregate_chunk(self, test_mains_list):

        means_vector = self.means_vector
        pi_s_vector = self.pi_s_vector
        means_vector = self.means_vector
        transmat_vector = self.transmat_vector

        test_mains_list = pd.concat(test_mains_list, axis=0)
        expression = 0
        sigma = np.ones((len(test_mains_list)))  # The initial vector of Sigmas

        test_mains_big = test_mains_list.values.flatten().reshape((-1, 1))
        #print (len(test_mains))
        arr_of_results = []

        for test_block in range(
                int(math.ceil(len(test_mains_big) / self.time_period))):

            err = 1e100
            print("BLock ", test_block)
            test_mains = test_mains_big[test_block *
                                        (self.time_period):(test_block + 1) *
                                        self.time_period]

            #initialization

            lambdas = []

            for appliance_id in range(self.num_appliances):
                #for t in range(len(test_mains)):
                lambdas.append(.05)

            for epoch in range(6):
                print("Epoch ", epoch)
                #                 print (test_mains.shape)
                #                 print (len(lambdas))
                if epoch % 2 == 1:
                    # The alernative Minimization

                    usage = np.zeros((len(test_mains)))

                    for appliance_id in range(self.num_appliances):

                        app_usage = np.sum(
                            s_[appliance_id] @ means_vector[appliance_id],
                            axis=1)

                        usage += app_usage
                        lambda_value = 1 + (
                            np.sum(app_usage) - self.signal_aggregates[
                                self.appliances[appliance_id]])**(2)
#                         print (self.appliances[appliance_id])
#                         #print ("Lambda = ",lambda_value)
#                         print ("Total usage ",np.sum(app_usage))
#                         print ("Signal Aggregate ",self.signal_aggregates[self.appliances[appliance_id]])
#                         print ("Gamma ",gamma.value[appliance_id])
#                         lambda_value = 1/lambda_value
#lambdas.append(lambda_value)

#print ("Lam")

#actual_usage = train_appliances[appliance_id][1].values.flatten()
# plt.figure(figsize=(20,8))
# plt.plot(np.where(app_usage.flatten()>0,app_usage.flatten(),0),label='pred')
# plt.plot(train_appliances[appliance_id][1].values,label='truth')
# plt.plot(train_main.flatten(),label='Input')
# plt.legend()
# plt.title(train_appliances[appliance_id][0])
# plt.savefig(str(epoch)+str(appliance_id)+'.png')
# print (train_appliances[appliance_id][0])

# print ("RMSE")
# print (mean_squared_error(actual_usage.flatten(),app_usage.flatten())**(.5))

# print("MAE")
# print (mean_absolute_error(actual_usage.flatten(),app_usage.flatten()))
# print (sigma.shape)
                    sigma = test_mains.flatten() - usage.flatten()
                    print("Sigma: ", np.mean(sigma))

                    # print (sigma.shape)
    #                sigma = np.where(sigma>1,sigma,1)
    #                usage = np.where(usage.flatten()>0,usage.flatten(),0)

                if epoch % 2 == 0:

                    constraints = []
                    cvx_state_vectors = []
                    cvx_variable_matrices = []
                    delta = cvx.Variable(shape=(len(test_mains), 1),
                                         name='delta_t')
                    gamma = cvx.Variable(shape=(self.num_appliances, 1),
                                         name='gamma')
                    #                     print ("delta and gamma")
                    #                     print (delta.shape)
                    #                     print (gamma.shape)
                    for appliance_id in range(self.num_appliances):
                        state_vector = cvx.Variable(
                            shape=(len(test_mains), self.default_num_states),
                            name='state_vec-%s' % (appliance_id))

                        cvx_state_vectors.append(state_vector)

                        # Enforcing that their values are ranged
                        constraints += [cvx_state_vectors[appliance_id] >= 0]
                        constraints += [cvx_state_vectors[appliance_id] <= 1]

                        # Enforcing that sum of states equals 1
                        for t in range(len(test_mains)):  # 6c
                            constraints += [
                                cvx.sum(
                                    cvx_state_vectors[appliance_id][t]) == 1
                            ]

                        # Creating Variable matrices for every appliance

                        appliance_variable_matrix = []
                        for t in range(len(test_mains)):

                            matrix = cvx.Variable(
                                shape=(self.default_num_states,
                                       self.default_num_states),
                                name='variable_matrix-%s-%d' %
                                (appliance_id, t))
                            appliance_variable_matrix.append(matrix)

                        cvx_variable_matrices.append(appliance_variable_matrix)

                        # Enforcing that their values are ranged
                        for t in range(len(test_mains)):
                            constraints += [
                                cvx_variable_matrices[appliance_id][t] >= 0
                            ]
                            constraints += [
                                cvx_variable_matrices[appliance_id][t] <= 1
                            ]

                        # Constraint 6e
                        for t in range(0, len(test_mains)):  # 6e
                            for i in range(self.default_num_states):
                                constraints += [
                                    cvx.sum(cvx_variable_matrices[appliance_id]
                                            [t][i]) ==
                                    cvx_state_vectors[appliance_id][t][i]
                                ]
                        # Constraint 6d
                        for t in range(1, len(test_mains)):  # 6d
                            for i in range(self.default_num_states):
                                constraints += [
                                    cvx.sum(
                                        (cvx_variable_matrices[appliance_id][t]
                                         ).T[i]) ==
                                    cvx_state_vectors[appliance_id][t - 1][i]
                                ]

                    # Second order cone constraints
                    soc_constraints = []

                    total_observed_reading = np.zeros((test_mains.shape))

                    #print (len(cvx_state_vectors))
                    for appliance_id in range(self.num_appliances):

                        #print (cvx_state_vectors[appliance_id].shape)
                        #print (cvx_state_vectors[appliance_id][t][j])
                        #print (means_vector[appliance_id][j])

                        total_observed_reading += cvx_state_vectors[
                            appliance_id] @ means_vector[appliance_id]
                        #soc_constraints+=[cvx.SOC( delta[t], train_main[t] - total_observed_reading)]

    #                soc_constraints+=[ cvx.norm(train_main[t] - total_observed_reading) <=  delta[t]]
    #                    soc_constraints+=[cvx.SOC( delta[t], train_main[t] - total_observed_reading)]
    # print (delta.shape)
    # print (total_observed_reading.shape)
    # print (train_main.shape)
                    for t in range(len(test_mains)):
                        soc_constraints += [
                            cvx.SOC(delta[t],
                                    test_mains[t] - total_observed_reading[t])
                        ]

                    # total_observed_reading = np.zeros((train_main.shape))

                    # for appliance_id in range(len(train_appliances)):
                    #     print(appliance_id)
                    #     print (len(cvx_state_vectors))
                    #     #print
                    #     total_observed_reading+=cvx_state_vectors[appliance_id]@means_vector[appliance_id]

                    # for t in range(len(train_main)):
                    #     #soc_constraints+=[cvx.SOC( delta[t], train_main[t] - total_observed_reading[t])]
                    #     soc_constraints+=[ cvx.norm(train_main[t] - total_observed_reading[t])**2 <=  delta[t]]

                    # print ("After SOC")
                    # print (train_main[t].shape)
                    # print (total_observed_reading[t].shape)
                    # print (delta[t].shape)
                    #print (cvx.sum(cvx.multiply(cvx_state_vectors[appliance_id][t][j],means_vector[appliance_id][j])).shape)
                    #print ('After SOC')
                    #print (train_main[t].shape,total_observed_reading.shape, delta )
                    # observed_usage = np.zeros((len(train_main),1))

                    # for t in range(len(train_main)):
                    #     #observed_usage[t]
                    #     for appliance_id in range(len(train_appliances)):
                    #     observed_usage+=cvx_state_vectors[appliance_id] @ means_vector[appliance_id]
                    # for t in range(len(train_main)):
                    #     soc_constraints+=[cvx.SOC(delta[t], train_main[t] - observed_usage)]

                    for appliance_id, appliance_name in enumerate(
                            self.appliances):
                        appliance_usage = cvx_state_vectors[
                            appliance_id] @ means_vector[appliance_id]
                        #                         print (appliance_usage.shape)
                        #                         print (cvx_state_vectors[appliance_id].shape)
                        #                         print (means_vector[appliance_id].shape)
                        total_usage = cvx.sum(appliance_usage, axis=0)

                        soc_constraints += [
                            cvx.SOC(
                                gamma[appliance_id], total_usage -
                                self.signal_aggregates[appliance_name])
                        ]
                    constraints += soc_constraints
                    #                     print ("SOC")
                    #                     print (gamma[appliance_id].shape)
                    #                     print (appliance_usage.shape)
                    #                     print (self.signal_aggregates[appliance_name].shape)
                    #                     print (total_usage.shape)
                    #print ()
                    # intializing the Expression

                    expression = 0

                    for appliance_id in range(self.num_appliances):

                        # First loop is over appliances

                        variable_matrix = cvx_variable_matrices[appliance_id]

                        transmat = transmat_vector[appliance_id]
                        # Next loop is over different time-stamps

                        for matrix in variable_matrix:
                            expression -= cvx.sum(
                                cvx.multiply(matrix, np.log(transmat)))

                        one_hot_states = cvx_state_vectors[appliance_id]
                        pi = pi_s_vector[appliance_id]

                        # The expression involving start states
                        first_one_hot_states = one_hot_states[0]
                        #print ("Pis")
                        #print (first_one_hot_states.shape)
                        #print (pi.shape)
                        expression -= cvx.sum(
                            cvx.multiply(first_one_hot_states, np.log(pi)))

                    #print (delta.shape)
                    #print (sigma.shape)

                    for t in range(len(test_mains)):
                        #print (delta[i].shape)
                        #print (sigma[i].shape)
                        #if sigma[t]>.8:
                        expression += .5 * (delta[t][0] / (sigma[t]**2))
                    #else:
                    #    expression+=.5 * (delta[t][0])

                    for appliance_id in range(self.num_appliances):
                        expression += gamma[appliance_id] * lambdas[
                            appliance_id]

                    expression = cvx.Minimize(expression)
                    constraints += [delta >= 0]
                    constraints += [gamma >= 0]
                    u = time.time()
                    #print (sigma.shape)
                    prob = cvx.Problem(expression, constraints)
                    #prob.solve(solver=cvx.ECOS_BB)
                    prob.solve(cvx.SCS, verbose=False)
                    #print (prob.value)
                    print("Time : ", time.time() - u)
                    s_ = [i.value for i in cvx_state_vectors]
                    if prob.value < err:
                        best_s = s_ + []
                        err = prob.value
                        print("Least error is ", err)
                    # print (delta.value)
                    # print (s_[0])
                    # print (np.sum(s_[0],axis=1))
                    # print (cvx_variable_matrices[0][0].value)
                    # print (cvx_variable_matrices[0][1].value)
                    # print ("Over Iteration")

            prediction_dict = {}

            for appliance_id in range(self.num_appliances):
                app_name = self.appliances[appliance_id]

                app_usage = np.sum(
                    best_s[appliance_id] @ means_vector[appliance_id], axis=1)
                prediction_dict[app_name] = app_usage.flatten()
                #usage+=app_usage

            arr_of_results.append(
                pd.DataFrame(prediction_dict, dtype='float32'))

        return [pd.concat(arr_of_results, axis=0)]
Ejemplo n.º 23
0
        sumParentQ += (Qij[parent][i] - Iij2[parent][i] * X[parent][i])
    for child in childNode[i]:
        sumChildP += (P[i][child] - Iij2[i][child] * R[i][child])
        sumChildQ += (Q[i][child] - Iij2[i][child] * X[i][child])
    equal_constraints += [sumParentP + P[i] == sumChildP,\
                        sumParentQ + Q[i] == sumChildQ]
equal_constraints += [Iij2 <= Imax, Iij2 >= 0]

# cone 相关的约束
cones = []
for line in lines:
    i = line[1] - 1
    j = line[2] - 1
    Xcone = cp.vstack(2 * Pij[i][j], 2 * Qij[i][j], Iij2[i][j] - U2[i])
    t = Iij2[i][j] + U2[i]
    cones += [cp.SOC(t, Xcone)]
obj = 0
for line in lines:
    i = line[1] - 1
    j = line[2] - 1
    obj += R[i][j] * Iij2[i][j]

prob = cp.Problem(cp.Minimize(obj),
                  equal_constraints + inequal_constraints + cones)
prob.solve(solver=cp.CVXOPT)
print(prob.status)
print(prob.value)
# 产生优化后的数据

# 分析优化后的数据
Ejemplo n.º 24
0
    def best_response(self, i_ego, state, trajectories):
        """Based on current trajectories, compute the best-response (BR) of player i_ego.

        This is done by solving an optimization problem over the trajectory
            p_ego[0], p_ego[1], ..., p_ego[N]
        for drone i_ego maximizing the progress along the track while also trying to block the opponent. Here t is the tangent vector that points along the track and N is the horizon length. The resulting optimization is implemented as the following:

        minimize -t^T p_ego[N]

        subject to

          - dynamical constraints
                ||p_ego[k+1] - p_ego[k]|| <= v_max*dt

          - stay-within-track constraints
                |n^T (p[k] - c)| <= width,
                |v^T (p[k] - c)| <= height,
            where v is the track vertical (t x n, where x here represents the cross product) and c is the track center

          - non-collision constraints
                ||p_ego[k] - p_opp[k]|| >= r_coll * 2

        The progress along the track is approximated by the progress along the tangent of the last point of the trajectory, i. e., maximize t^T p_ego[k]. The non-collision constraints are non convex and are linearized here. Instead of requiring the ego drone to stay outside a circle, the drone is constrained to be in a half-plane tangential to that circle. In addition to optimizing track progress, the penalty of violating the safety radius is accounted for. 

        If the blocking term defined in the trajectory parameters is non-zero, we add an additional term to the objective function that incentivizes the drones to slightly adjust their trajectories to block the opponent. Now the objective function is 
        
            -t^T p_ego[N] + sum_k( gamma^k p_rel[k]^T n n^T p_rel[k] )

        where the sum here is over the full trajectory (1, ..., k, ..., N), gamma is the blocking coefficient (a term that is positive when the opponent is behind the ego agent and zero otherwise), and p_rel = p_ego - p_opp is the relative pose vector between the two drones. This is just a heuristic for "blocking" behavior that is only activated when the ego drone is in front of the opponent. 

        :param i_ego: The drone index of the ego drone.
        :param state: The current state (positions) of the two drones.
        :param trajectories: The current trajectories
        :return: Optimized trajectory of player i_ego
        """
        i_opp = (i_ego + 1) % 2
        v_ego = self.drone_params[i_ego]["v_max"]
        r_coll_ego = self.drone_params[i_ego]["r_coll"]
        r_coll_opp = self.drone_params[i_opp]["r_coll"]
        r_safe_ego = self.drone_params[i_ego]["r_safe"]
        r_safe_opp = self.drone_params[i_opp]["r_safe"]
        d_coll = r_coll_ego + r_coll_opp
        d_safe = r_safe_ego + r_safe_opp
        p = cp.Variable(shape=(self.n_steps, 3))

        # === Dynamical Constraints ===
        # ||p_0 - p[0]|| <= v*dt
        init_dyn_constraint = cp.SOC(cp.Constant(v_ego * self.dt),
                                     cp.Constant(state[i_ego, :]) - p[0, :])
        # ||p[k+1] - p[k]|| <= v*dt
        dyn_constraints = [init_dyn_constraint] + [
            cp.SOC(cp.Constant(v_ego * self.dt), p[k + 1, :] - p[k, :])
            for k in range(self.n_steps - 1)
        ]

        # === Track Constraints ===
        track_constraints = []
        track_obj = cp.Constant(0)
        track_objective_exp = 0.5  # exponentially decreasing weight
        t = np.zeros((self.n_steps, 3))
        n = np.zeros((self.n_steps, 3))
        for k in range(self.n_steps):
            # query track indices at ego position
            idx, c, t[k, :], n[
                k, :], width, height = self.track.track_frame_at(
                    trajectories[i_ego][k, :])
            # hortizontal track height constraints
            track_constraints.append(
                n[k, :].T @ p[k, :] - np.dot(n[k, :], c) <= width - r_coll_ego)
            track_constraints.append(
                n[k, :].T @ p[k, :] -
                np.dot(n[k, :], c) >= -(width - r_coll_ego))
            # vertical track height constraints
            v = np.cross(
                t[k, :],
                n[k, :])  # the vertical direction component of the track
            track_constraints.append(
                v.T @ p[k, :] - v.dot(c) <= height - r_coll_ego)
            track_constraints.append(
                v.T @ p[k, :] - v.dot(c) >= -(height - r_coll_ego))
            # track constraints objective
            track_obj += (track_objective_exp**k) * (
                cp.pos(n[k, :].T @ p[k, :] - np.dot(n[k, :], c) -
                       (width - r_coll_ego)) +
                cp.pos(-(n[k, :].T @ p[k, :] - np.dot(n[k, :], c) +
                         (width - r_coll_ego))))

        # === Non-Collision Constraints ===
        nc_constraints = []
        nc_obj = cp.Constant(0)
        nc_relax_obj = cp.Constant(0)
        non_collision_objective_exp = 0.5  # exponentially decreasing weight
        for k in range(self.n_steps):
            p_opp = trajectories[i_opp][k, :]
            p_ego = trajectories[i_ego][k, :]
            # Compute beta, the normal direction vector pointing from the ego's drone position to the opponent's
            beta = p_opp - p_ego
            if np.linalg.norm(beta) >= 1e-6:
                # Only normalize if norm is large enough
                beta /= np.linalg.norm(beta)
            #     n.T * (p_opp - p_ego) >= d_coll
            nc_constraints.append(beta.dot(p_opp) - beta.T @ p[k, :] >= d_coll)
            # For normal non-collision objective use safety distance
            nc_obj += (non_collision_objective_exp**
                       k) * cp.pos(d_safe -
                                   (beta.dot(p_opp) - beta.T @ p[k, :]))
            # For relaxed non-collision objective use collision distance
            nc_relax_obj += (non_collision_objective_exp**
                             k) * cp.pos(d_coll -
                                         (beta.dot(p_opp) - beta.T @ p[k, :]))

        # === Blocking Heuristic Objective ===
        blocking_obj = cp.Constant(0)
        blocking_objective_exp = 0.5  # exponentially decreasing weight
        leader_term = np.dot(
            (trajectories[i_ego][0, :] - trajectories[i_opp][0, :]), t[0, :])
        if (self.blocking & (leader_term > 0.0)):
            for k in range(self.n_steps):
                p_opp = trajectories[i_opp][k, :]
                # scale factor for leading robot
                p_rel = trajectories[i_ego][k, :] - p_opp
                leader_term = np.dot(p_rel, t[k, :])
                gamma = 0.0
                if (leader_term > 0):
                    gamma = 1.0 / (leader_term * leader_term) / (k + 1)
                else:
                    gamma = 0.0
                # add blocking cost function
                blocking_obj += gamma * blocking_objective_exp**k * cp.quad_form(
                    p[k, :] - p_opp, np.outer(n[k, :], n[k, :]))

        # === "Win the Race" Objective ===
        # Take the tangent t at the last trajectory point
        # This serves as an approximation to the total track progress
        obj = -t[-1, :].T @ p[-1, :]

        # create the problem in cxvpy and solve it
        prob = cp.Problem(
            cp.Minimize(obj + self.nc_weight * nc_obj +
                        self.blocking_weight * blocking_obj),
            dyn_constraints + track_constraints + nc_constraints)

        # try to solve proposed problem
        trajectory_result = np.array((self.n_steps, 3))
        try:
            prob.solve()
            # relax track constraints if problem is infeasible
            if np.isinf(prob.value):
                print("WARN: relaxing track constraints")
                # If the problem is not feasible, relax track constraints
                # Assert it is indeed an infeasible problem and not unbounded (in which case value is -inf).
                # (The dynamical constraints keep the problem bounded.)
                assert prob.value >= 0.0

                # Solve relaxed problem (track constraint -> track objective)
                relaxed_prob = cp.Problem(
                    cp.Minimize(obj + self.nc_weight * nc_obj +
                                self.track_relax_weight * track_obj),
                    dyn_constraints + nc_constraints)
                relaxed_prob.solve()

                # relax non-collision constraints if problem is still  infeasible
                if np.isinf(relaxed_prob.value):
                    print("WARN: relaxing non collision constraints")
                    # If the problem is still infeasible, relax non-collision constraints
                    # Again, assert it is indeed an infeasible problem and not unbounded (in which case value is -inf).
                    # (The dynamical constraints keep the problem bounded.)
                    assert relaxed_prob.value >= 0.0

                    # Solve relaxed problem (non-collision constraint -> non-collision objective)
                    relaxed_prob = cp.Problem(
                        cp.Minimize(obj + self.nc_weight * nc_obj +
                                    self.nc_relax_weight * nc_relax_obj),
                        dyn_constraints + track_constraints)
                    relaxed_prob.solve()

                    assert not np.isinf(relaxed_prob.value)
            trajectory_result = p.value
        except:  # if cvxpy fails, just return the initialized trajectory to do something
            print(
                "WARN: cvxpy failre: resorting to initial trajectory (no collision constraints!)"
            )
            trajectory_result = trajectories[i_ego]
        return trajectory_result
Ejemplo n.º 25
0
p = 5
n_i = 5
np.random.seed(2)
f = np.random.randn(n)
A = []
b = []
c = []
d = []
x0 = np.random.randn(n)
for i in range(m):
    A.append(np.random.randn(n_i, n))
    b.append(np.random.randn(n_i))
    c.append(np.random.randn(n))
    d.append(np.linalg.norm(A[i] @ x0 + b, 2) - c[i].T @ x0)
F = np.random.randn(p, n)
g = F @ x0

# Define and solve the cvxpy problem
x = cp.Variable(n)

# We use cp.SOC(t,x) to create the SOC constraint
soc_constraint = [cp.SOC(c[i].T @ x + d[i], A[i] @ x + b[i]) for i in range(m)]
prob = cp.Problem(cp.Minimize(f.T @ x), soc_constraint + [F @ x == g])
prob.solve()
print("The optimal value is", prob.value)
print("A solution x is")
print(x.value)
for i in range(m):
    print("SOC constraint %i dual variable solution" % i)
    print(soc_constraint[i].dual_value)