示例#1
0
def SVM_SVM(absDataOrigin, absLabels, cmpDataOrigin, cmpLabels, absWeight,
            lamda):
    # This function trains both absolute labels and comparison labels based on SVM.
    # Parameter:
    # ------------
    # absDataOrigin : N by d numpy matrix where N the number of absolute label data and d is the dimension of data
    # abslabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
    # cmpDataOrigin : N by d numpy matrix where N the number of comparion label data and d is the dimension of data
    # cmpLabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
    # absWeight : the Weight on absolute label data. And (1-absWeight) would be the weight on comparison data.
    # lamda : weight on L1 penalty. Large lamda would have more zeros in beta.

    # Return:
    # ------------
    # beta : the logistic regression model parameter
    # const : the logistic regression global constant.

    cmpWeight = 1.0 - absWeight
    absN, d = np.shape(absDataOrigin)
    cmpN, _ = np.shape(cmpDataOrigin)
    beta = cp.Variable(d)
    const = cp.Variable(1)
    objective = absWeight*cp.sum_entries(cp.pos(1 - cp.mul_elemwise(absLabels, absDataOrigin*beta+const)))\
                +cmpWeight*cp.sum_entries(cp.pos(1 - cp.mul_elemwise(cmpLabels, cmpDataOrigin*beta+const)))\
                +lamda*cp.norm(beta,1)
    prob = cp.Problem(cp.Minimize(objective))
    prob.solve(solver=cp.SCS)
    return beta.value, const.value
def prob_distribution_function(w1, w2):
    #trusts(a,b)->reachable(a,b)
    #reachable(a,b) & trusts(b,c) -> reachable(a,c)

    #print(state)
    #print(len(state))
    prob_distribution = 0

    query1 = '''
    SELECT * from rule1
    '''
    query2 = '''
    SELECT * from rule2
    '''
    query3 = '''
    SELECT * from reachable
    '''

    #print('\nRule1\n%s'%('='*10))
    rule1 = get_query_result(query1)
    #print(rule1)

    #print('\nRule2\n%s'%('='*10))
    rule2 = get_query_result(query2)
    #print(rule2)

    #print('\nReachables\n%s'%('='*10))
    reachables = get_query_result(query3)
    #print(reachables)

    vid_dict = dict()
    #print(len(reachables))

    i = 0
    for r in reachables:
        vid_dict[(r[0], r[1])] = cvxpy.Variable()
        i += 1
    #print (len(vid_dict))

    for r in rule1:
        if r[3] == None:
            reachable = vid_dict[(r[0], r[1])]
        else:
            reachable = float(r[3])
        prob_distribution += w1 * cvxpy.pos(reachable - float(r[2]))

    for r in rule2:
        if r[3] == None:
            reachable1 = vid_dict[(r[0], r[1])]
        else:
            reachable1 = float(r[3])
        if r[5] == None:
            reachable2 = vid_dict[(r[0], r[2])]
        else:
            reachable2 = float(r[5])
        prob_distribution += w2 * cvxpy.pos(
            cvxpy.pos(reachable1 + float(r[4]) - 1.0) - reachable2)

    return prob_distribution, vid_dict
def prob_distribution_function_network_design(w1,w2):
    # ~presents(a,b)
    # ~edge(a,b)->~presents(a,b)
    # edge(s,t1) & edge(t1,t2) & ~presents(t1,t2) -> ~presents(s,t1)
    # edge(s,t1) & edge(s,t2) & ~present(s,t2) -> ~presents(s,t1)
    
    prob_distribution = 0
    
    query1 = '''
    SELECT * from rule1
    '''
    query2 = '''
    SELECT * from rule2
    '''
    query3 = '''
    SELECT * from presents
    '''
    
    #print('\nRule1\n%s'%('='*10))
    rule1 = get_query_result(query1)
    #print(rule1)
    
    #print('\nRule2\n%s'%('='*10))
    rule2 = get_query_result(query2)
    #print(rule2)
    
    #print('\nPresents\n%s'%('='*10))
    presents = get_query_result(query3)
    #print(presents)
    
    vid_dict = dict()
    #print(len(reachables))
    
    i = 0 
    for r in presents:
        vid_dict [(r[0],r[1])] = cvxpy.Variable()
        i+=1
    #print (len(vid_dict))
    
    
    for r in rule1:
        if r[3]==None:
            present = vid_dict [(r[0],r[1])]
        else:
            present = float(r[3])
        prob_distribution+=w1 * cvxpy.pos(present - float(r[2]))
        
    for r in rule2:
        if r[3]==None:
            present1 = vid_dict [(r[0],r[1])]
        else:
            reachable1 = float(r[3])
        if r[5]==None:
            present2 = vid_dict [(r[0],r[2])]
        else:
            present2 = float(r[5])
        prob_distribution+=w2 * cvxpy.pos(cvxpy.pos(present1 + float(r[4])-1.0) - present2)
        
    return prob_distribution, vid_dict
 def switch(self):
     if self.phi_name == 'logistic':
         self.cvx_phi = lambda z: cvx.logistic(-z) / math.log(2, math.e)
     else:
         self.cvx_phi = lambda z: cvx.pos(1 - z)
     if self.kappa_name == 'Zafar_hinge':
         self.cvx_kappa = lambda z: cvx.pos(z)
     else:
         self.cvx_kappa = lambda z: z
示例#5
0
def newsvendor_opt(params, py):
    z = cp.Variable(1)
    d = params['d']
    f = (params['c_lin'] * z + 0.5 * params['c_quad'] * cp.square(z) + py.T *
         (params['b_lin'] * cp.pos(d - z) + 0.5 * params['b_quad'] *
          cp.square(cp.pos(d - z)) + params['h_lin'] * cp.pos(z - d) +
          0.5 * params['h_quad'] * cp.square(cp.pos(z - d))))
    fval = cp.Problem(cp.Minimize(f), [z >= 0]).solve()
    return z.value, fval
示例#6
0
def convert_constraint1(marketed, buy):
    #marketed(u) & user(u) ->buy(u)
    constraint = []
    obj_function = 0.0
    for uid, value in buy.items():
        obj_function += cvxpy.pos(marketed[uid] - buy[uid])
        constraint.append(cvxpy.pos(marketed[uid] - buy[uid]))
        #constraint.append(marketed[uid]-buy[uid]<=0)
    return constraint, obj_function
    def prune_(self, proba, target, data=None):
        n_received = len(proba)
        if self.n_estimators >= n_received:
            return range(
                0, n_received), [1.0 / n_received for _ in range(n_received)]

        if self.alpha < 1:
            single_scores = Parallel(n_jobs=self.n_jobs, backend="threading")(
                delayed(self.single_metric)(i, proba, target)
                for i in range(n_received))
            q = np.array(single_scores)
        else:
            q = np.zeros((n_received, 1))

        if self.alpha > 0:
            pairwise_scores = Parallel(
                n_jobs=self.n_jobs, backend="threading")(
                    delayed(self.pairwise_metric)(i, j, proba, target)
                    for i in range(n_received) for j in range(i, n_received))

            # TODO This is probably easier and quicker with some fancy numpy operations
            P = np.zeros((n_received, n_received))
            s = 0
            for i in range(n_received):
                for j in range(i, n_received):
                    if i == j:
                        P[i, j] = pairwise_scores[s]
                    else:
                        P[i, j] = pairwise_scores[s]
                        P[j, i] = pairwise_scores[s]
                    s += 1
            P += self.eps * np.eye(n_received)

        else:
            P = np.zeros((n_received, n_received))

        w = cp.Variable(n_received, boolean=True)

        if self.alpha == 1:
            objective = cp.quad_form(w, P)
        elif self.alpha == 0:
            objective = q.T @ w
        else:
            objective = cp.pos((1.0 - self.alpha)) * q.T @ w + cp.pos(
                self.alpha) * cp.quad_form(w, P)

        prob = cp.Problem(cp.Minimize(objective), [
            atoms.affine.sum.sum(w) == self.n_estimators,
        ])
        prob.solve(verbose=self.verbose)
        selected = [i for i in range(n_received) if w.value[i]]
        weights = [1.0 / len(selected) for _ in selected]

        return selected, weights
    def switch(self):
        if self.phi_name == 'logistic':
            self.cvx_phi = lambda z: cvx.logistic(-z)  # / math.log(2, math.e)
        elif self.phi_name == 'hinge':
            self.cvx_phi = lambda z: cvx.pos(1 - z)
        elif self.phi_name == 'squared':
            self.cvx_phi = lambda z: cvx.square(-z)
        elif self.phi_name == 'exponential':
            self.cvx_phi = lambda z: cvx.exp(-z)
        else:
            logger.error('%s is not include' % self.phi_name)
            logger.info('Logistic is the default setting')
            self.cvx_phi = lambda z: cvx.logistic(-z)  # / math.log(2, math.e)

        if self.kappa_name == 'logistic':
            self.cvx_kappa = lambda z: cvx.logistic(z)  # / math.log(2, math.e)
            self.psi_kappa = lambda mu: ((1 + mu) * math.log(1 + mu) +
                                         (1 - mu) * math.log(3 - mu)) / 2
        elif self.kappa_name == 'hinge':
            self.cvx_kappa = lambda z: cvx.pos(1 + z)
            self.psi_kappa = lambda mu: mu
        elif self.kappa_name == 'squared':
            self.cvx_kappa = lambda z: cvx.square(1 + z)
            self.psi_kappa = lambda mu: mu**2
        elif self.kappa_name == 'exponential':
            self.cvx_kappa = lambda z: cvx.exp(z)
            self.psi_kappa = lambda mu: 1 - math.sqrt(1 - mu**2)
        else:
            logger.error('%s is not include' % self.kappa_name)
            logger.info('hinge is the default setting')
            self.cvx_kappa = lambda z: cvx.pos(1 + z)
            self.psi_kappa = lambda mu: mu

        if self.delta_name == 'logistic':
            self.cvx_delta = lambda z: 1 - cvx.logistic(
                -z)  # / math.log(2, math.e)
            self.psi_delta = lambda mu: ((1 + mu) * math.log(1 + mu) +
                                         (1 - mu) * math.log(1 - mu)) / 2
        elif self.delta_name == 'hinge':
            self.cvx_delta = lambda z: 1 - cvx.pos(1 - z)
            self.psi_delta = lambda mu: mu
        elif self.delta_name == 'squared':
            self.cvx_delta = lambda z: 1 - cvx.square(1 - z)
            self.psi_delta = lambda mu: mu**2
        elif self.delta_name == 'exponential':
            self.cvx_delta = lambda z: 1 - cvx.exp(-z)
            self.psi_delta = lambda mu: 1 - math.sqrt(1 - mu**2)
        else:
            logger.error('%s is not include' % self.delta_name)
            logger.info('hinge is the default setting')
            self.cvx_delta = lambda z: cvx.pos(1 + z)
            self.psi_delta = lambda mu: mu
示例#9
0
def convert_constraint2(reachable, buy):
    #reachable(u,v) & buy(u) -> buy(v)
    constraint = []
    obj_function = 0.0
    for edge, value in reachable.items():
        u = edge[0]
        v = edge[1]
        obj_function += cvxpy.pos(
            cvxpy.pos(float(value) + buy[u] - 1.0) - buy[u])
        constraint.append(
            cvxpy.pos(cvxpy.pos(float(value) + buy[u] - 1.0) - buy[u]))
        #constraint.append(float(value)+buy[u]-buy[v]<=1)
    return constraint, obj_function
示例#10
0
def svm_cvxpy(X, y, C, idx_train, idx_val):
    C = float(C)
    Xtrain, Xtest, ytrain, ytest = map(
        torch.from_numpy,
        [X[idx_train, :], X[idx_val], y[idx_train], y[idx_val]])

    n_samples_train, n_features = Xtrain.shape

    # set up variables and parameters
    beta_cp = cp.Variable(n_features)
    C_cp = cp.Parameter(nonneg=True)

    # set up objective
    loss = cp.sum_squares(beta_cp) / 2
    reg = C_cp * cp.sum(cp.pos(1 - cp.multiply(ytrain, Xtrain @ beta_cp)))
    objective = loss + reg

    # define problem
    problem = cp.Problem(cp.Minimize(objective))
    assert problem.is_dpp()

    # solve problem
    layer = CvxpyLayer(problem, parameters=[C_cp], variables=[beta_cp])
    C_th = torch.tensor(C, requires_grad=True)
    beta_, = layer(C_th)

    # get test loss and it's gradient
    test_loss = (Xtest @ beta_ - ytest).pow(2).mean()
    test_loss.backward()

    val = test_loss.detach().numpy()
    grad = np.array(C_th.grad)
    return val, grad
示例#11
0
def CVX_Slope(X_train, y_train, llambda, k_Slope, solver):
    start_time = time.time()
    N, P = X_train.shape

    beta = cvx.Variable(P)
    beta0 = cvx.Variable()
    loss = cvx.sum(cvx.pos(1 - cvx.multiply(y_train, X_train * beta + beta0)))
    reg1 = cvx.norm(beta, 1)

    #for j in range(min(P-1, K_max)): reg2  += (lambda_arr[j]-lambda_arr[j+1]) * sum_largest(abs(beta), j+1)
    reg2 = cvx.sum_largest(cvx.abs(beta), k_Slope)

    prob = cvx.Problem(cvx.Minimize(loss + llambda * reg1 + llambda * reg2))
    dict_solver = {'gurobi': cvx.GUROBI, 'ecos': cvx.ECOS}

    prob.solve(solver=dict_solver[solver])

    ### Solution
    support = np.where(np.round(beta.value, 6) != 0)[0]
    print '\nLen support ' + solver + ' = ' + str(len(support))

    ### Obj val
    obj_val = prob.value
    print 'Obj value ' + solver + '  = ' + str(obj_val)

    ### Time stops
    time_cvx = time.time() - start_time
    print 'Time CVX ' + solver + ' = ' + str(round(time_cvx, 2))
示例#12
0
 def primal_expr_Ax(self, A, x_var, voxel_weights=None):
     residuals = cvxpy.pos((x_var.T * A.T).T - float(self.deadzone_dose))
     if voxel_weights is not None:
         voxel_weights = np.reshape(voxel_weights,
                                    residuals.shape)  # kelsey added
         residuals = cvxpy.multiply(voxel_weights, residuals)
     return self.weight * cvxpy.sum(residuals)
def LESS(signal, G,  rho, weightFactors):

    epsilon = 1e-5
    n = len(G.nodes())
    ## LESS
    current_obj = -np.inf
    current_sol = None
    inci = nx.incidence_matrix(G, oriented=True).T
    for t in range(1, n+1):
        x_opt = cvx.Variable(n)
        y = signal
        v = inci * x_opt
        obj = x_opt.T * y / np.sqrt(t)
        constraints = [x_opt >= 0,
                       x_opt <= 1,
                       cvx.sum(x_opt) <= t,
                       cvx.norm(cvx.pos(v), 1) <= rho]
        prob = cvx.Problem(cvx.Maximize(obj), constraints)
        #try:
        prob.solve(solver=cvx.SCS)
        #except:
        #    print("Cannot solve! because: ", prob.status)
        #    continue

        if obj.value > current_obj:
            current_obj = obj.value
            current_sol = x_opt.value
    
    if current_sol is not None:
        LESS_subset = np.where(current_sol > epsilon)[0].tolist()
        LESS_sol    = (LESS_subset, cal_removal_loss(LESS_subset, G, weightFactors))
    else:
        LESS_sol = (None, rho)
    return LESS_sol
示例#14
0
def soiling_seperation_algorithm(observed, iterations=5, weights=None,
                                 index_set=None, tau=0.85):
    if weights is None:
        weights =  np.ones_like(observed)
    if index_set is None:
        index_set = ~np.isnan(observed)
    zero_set = np.zeros(len(observed) - 1, dtype=np.bool)
    eps = .01
    n = len(observed)
    s1 = cvx.Variable(n)
    s2 = cvx.Variable(n)
    s3 = cvx.Variable(n)
    w = cvx.Parameter(n - 2, nonneg=True)
    w.value = np.ones(len(observed) - 2)
    for i in range(iterations):
        # cvx.norm(cvx.multiply(s3, weights), p=2) \
        cost = .1 * cvx.sum(tau * cvx.pos(s3) +(1 - tau) * cvx.neg(s3)) \
               + 10 * cvx.norm(cvx.diff(s2, k=2), p=2) \
               + .2 * cvx.norm(cvx.multiply(w, cvx.diff(s1, k=2)), p=1)
        objective = cvx.Minimize(cost)
        constraints = [
            observed[index_set] == s1[index_set] + s2[index_set] + s3[index_set],
            s2[365:] - s2[:-365] == 0,
            cvx.sum(s2[:365]) == 0
            # s1 <= 1
        ]
        if np.sum(zero_set) > 0:
            constraints.append(cvx.diff(s1, k=1)[zero_set] == 0)
        problem = cvx.Problem(objective, constraints)
        problem.solve(solver='MOSEK')
        w.value = 1 / (eps + 1e2* np.abs(cvx.diff(s1, k=2).value))   # Reweight the L1 penalty
        zero_set = np.abs(cvx.diff(s1, k=1).value) <= 5e-5     # Make nearly flat regions exactly flat (sparse 1st diff)
    return s1.value, s2.value, s3.value
示例#15
0
def psl_objective(var_ids, vid_dict, r_list):
    constraints = []

    for vid in var_ids:
        var = cvxpy.Variable()
        vid_dict[vid] = var
        constraints += [0 <= var, var <= 1]

    f = 0
    for weight, body, head in r_list:
        expr = 1
        for b in body:
            if b[0]:
                y = b[1]
            else:
                y = vid_dict[b[1]]
            if b[2]:
                expr -= y
            else:
                expr -= (1 - y)
        for h in head:
            if h[0]:
                y = h[1]
            else:
                y = vid_dict[h[1]]
            if h[2]:
                expr -= (1 - y)
            else:
                expr -= y
        f += weight * cvxpy.pos(expr)

    return f, constraints
示例#16
0
 def train(self, X, Y, trials=1, store_data=True):
     # set info from data
     if store_data:
         self.X, self.Y, self.trials = X, Y, trials
     m, n = X.shape[0], X.shape[1]
     # setup optimization variables
     w = cp.Variable((n, 1))
     b = cp.Variable()
     C = cp.Parameter(nonneg=True)
     # setup optimization problem
     loss = cp.sum(cp.pos(1 - cp.multiply(Y, X * w - b)))
     reg = 0.5 * cp.norm(w, 2)
     prob = cp.Problem(cp.Minimize(reg + C * loss))
     C_vals = np.logspace(-2, 0, trials)
     w_vals, b_vals = [], []
     min_error, min_error_i = 1, 0
     # try out a bunch of different C values to find the best one
     for i in range(trials):
         C.value = C_vals[i]
         prob.solve()
         train_error = (Y != np.sign(X.dot(w.value) - b.value)).sum() / m
         if train_error < min_error:
             min_error, min_error_i = train_error, i
         w_vals.append(w.value)
         b_vals.append(b.value)
     # find smallest error
     self.w = w_vals[min_error_i]
     self.b = b_vals[min_error_i]
     self.C = C_vals[min_error_i]
     # store all values
     self.w_vals = w_vals
     self.b_vals = b_vals
     self.C_vals = C_vals
示例#17
0
    def __learnSeparator(self, X, y, lambda_reg, verbose=False):
        solverArg = self.get_params()['solver']

        if solverArg == "cvxpy":
            #            print("cvxpy used")
            alphaVar = cvx.Variable(X.shape[1])
            loss = cvx.sum(cvx.pos(1 - (y[:, None] * X) @ alphaVar))
            reg = cvx.norm1(alphaVar)
            prob = cvx.Problem(objective=cvx.Minimize(loss + lambda_reg * reg))
            try:
                prob.solve(solver='SCS')
                alpha = np.array(alphaVar.value).flatten()
            except:
                print("Warning: MOSEK solver failed, switching to SGD...")
                solverArg = "sgd"

        # stochastic gradient descent: for large data sets
        if solverArg == "sgd":
            #            print("sgd used")
            sgdLinClassifier = SGDClassifier(
                loss='hinge',
                penalty='l1',
                fit_intercept=False,
                alpha=lambda_reg,  #tol= 1e-30, verbose = False, 
                # learning_rate= 'optimal', eta0= 1e-6, shuffle= True,
                # max_iter= np.ceil(1e6 / len(y)), # empirical rule of thumb, given in the sklearn documentation
                # verbose= 10,# average= 10,
            )

            sgdLinClassifier.fit(X, y)
            alpha = sgdLinClassifier.coef_.flatten()
        # print(lambda_reg)
        # print("classifier solved !")
        return alpha
 def P(self, s_id, expr_):
   m_p = self.sessions_beingserved_dict[s_id]['app_pref_dict']['m_p']
   x_p = self.sessions_beingserved_dict[s_id]['app_pref_dict']['x_p']
   # return m_p*(expr_-x_p)
   # return cp.max_elemwise( *(m_p*(expr_-x_p), 0) )
   # return expr_
   return m_p*cp.pos(expr_ - x_p)
示例#19
0
 def fit(self, X, y, c=1):
     """
     Train a SVM classifier using data (using Primal problem).
     """
     self.X = X
     self.y = y
     # parameters
     D = X.shape[1]  # number of features
     N = X.shape[0]  # number of samples
     # instantiate variables in cvxpy
     W = cvx.Variable(D)
     b = cvx.Variable()
     # loss function of the primal SVM
     loss = (
         0.5 * cvx.sum_squares(W) +
         c * cvx.sum_entries(cvx.pos(1 - cvx.mul_elemwise(y, X * W + b))))
     # define loss function manually
     # loss = 0.5 * cvx.sum_squares(W)
     # for i in range(N):
     # loss += c * cvx.pos(1 - y[i] * (X[i]*W+b))
     # need to minimize loss/N to avoid error
     prob = cvx.Problem(cvx.Minimize(loss / N))
     prob.solve()
     # save results
     self.W = W.value
     self.b = b.value
     return self.W, self.b
def opt_cvx(
        x_counts: Sequence[np.ndarray],
        sizes: Sequence[int],
        n_iter: int=10
) -> np.ndarray:
    n = len(sizes)
    Bs = cvx.Variable(n)
    constraints = [
        Bs >= 0
    ]
    term2 = 0
    for i in range(n):
        x_count = x_counts[i]
        size = sizes[i]
        term2 += cvx.square(cvx.sum(cvx.pos(x_count - Bs[i])) / size)
    o = cvx.Minimize(
        4 * cvx.square(cvx.sum(Bs)) + term2
    )
    prob = cvx.Problem(o, constraints)
    sol = prob.solve(solver=cvx.ECOS)
    b_values = Bs.value
    n_adj = np.zeros(n)
    for i in range(n):
        n_adj[i] = n_bias(x_counts[i], b_values[i]) / sizes[i]
    print("Cost: {}".format(cost(b_values, n_adj)))
    return np.round(b_values)
示例#21
0
def LinearSVM_Primal(X, y, C):
    w = cp.Variable((2, 1))
    b = cp.Variable()
    y_t = y.reshape(-1, 1)
    loss = cp.sum(cp.pos(1 - cp.multiply(y_t, X @ w - b)))
    reg = cp.norm(w, 1)
    prob = cp.Problem(cp.Minimize(C * loss + 1 / 2 * reg))
    prob.solve()

    w_c = cp.Variable(2)
    b_c = cp.Variable(1)
    constraint_hold = []
    for i in range(len(X)):
        slack_comp = cp.abs(w_c @ X[i] + b_c) / cp.norm(w_c, 2)
        cons = y[i] * slack_comp
        constraint_hold.append(cons)
    constraints = [0 <= constraint_hold[i] for i in range(len(X))]
    func = 1 / 2 * cp.norm(w_c, 2)
    for i in range(len(X)):
        func += C * cp.abs(w_c @ X[i] + b_c) / cp.norm(w_c, 2)
    objective = cp.Minimize(func)
    prob = cp.Problem(objective, constraints)
    prob.solve()
    w = w_c.value
    b = b_c.value
    return w, b, sol_time
def _single_d_conic_(p, d, Q, G, h, A, b, T=24, sol_opt=cp.SCS, verbose=0):
    if d.shape == (T,):
        d = np.expand_dims(d, 1)

    Diff_coef_ = np.concatenate([np.eye(T), -np.eye(T)], axis=1)

    x_ = cp.Variable(3 * T)
    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + p.T * cp.pos(Diff_coef_ * x_[0:(2 * T), 0] + d))
    ineqCon = G * x_ <= h
    eqCon = A * x_ == b
    cons = [ineqCon, eqCon]
    prob = cp.Problem(obj, cons)
    A_, b_, c_, cone_dims = scs_data_from_cvxpy_problem(prob, cp_SCS=sol_opt)

    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A_, b_, c_, cone_dims, eps=1e-5)

    x_hat = x[:(3 * T)]
    # dx, dy, ds = derivative(A_, b_, c_, atol=1e-4, btol=1e-4)
    # dA, db, dc = adjoint_derivative(dx, np.zeros(y.size), np.zeros(s.size))
    dA, db, dc = adjoint_derivative(c_, np.zeros(y.size), np.zeros(s.size))
    # print("c_ toward db ", db[T:2*(T)])
    # dA, db, dc = adjoint_derivative(np.ones(c_.size), np.zeros(y.size), np.zeros(s.size))
    # print("ones toward dc ", db[T:2*(T)])
    # the demand d was converted in the Ax=b format b = [0,.., d, 0,...]
    return x_hat, db[T:2*(T)]
示例#23
0
 def setObjective(self):
     y_ijt = {}
     for i in range(self.States):
         for j in range(self.Actions):
             for t in range(self.Time):
                 y_ijt[(i, j, t)] = cvx.Variable()
     if self.isQuad:
         print "quadratic objective"
         objF = sum([sum([sum([-0.5*cvx.pos(self.R[i,j,t])*cvx.square(y_ijt[(i,j,t)])
                      for i in range(self.States) ])
                 for j in range(self.Actions)])
            for t in range(self.Time)]) \
                + sum([sum([sum([(self.C[i,j,t])*y_ijt[(i,j,t)]
                      for i in range(self.States) ])
                 for j in range(self.Actions)])
            for t in range(self.Time)])
     else:
         objF = -sum([
             sum([
                 sum([
                     y_ijt[(i, j, t)] * self.R[i, j, t]
                     for i in range(self.States)
                 ]) for j in range(self.Actions)
             ]) for t in range(self.Time)
         ])
     self.yijt = y_ijt
     self.lpObj = objF
def forward_single_d_conic_solve_Filter(Q, q, G, h, A, b, d, epsilon, xi, delta=0.01,
                                        T=48, p=None, sol_opt=cp.CVXOPT, verbose=False):
    nz, neq, nineq = q.shape[0], A.shape[0] if A is not None else 0, G.shape[0]

    if p.shape == (T,):
        p = np.expand_dims(p, 1)  # convert the price into a column vector

    if d.shape == (T,):
        d = np.expand_dims(d, 1)

    if verbose:
        print("\n inside the cvx np filter :", T, nz)
        print([part.shape for part in [Q, q, G, h, A, b]])

    x_ = cp.Variable(nz)
    # GAMMA = cp.Semidef(T)
    GAMMA = cp.Variable(rows=T, cols=T)
    # assert T == nz / 3
    # print("x size {}, num of ineq {}".format(x_.size, nineq))
    term1 = GAMMA * epsilon + d

    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + q.T * x_ + p.T * cp.pos(term1) + cp.pos(cp.norm(GAMMA, "nuc") - xi))
    eqCon = A * x_ == b if neq > 0 else None
    prob_ineqCon = [cp.norm(GAMMA[:, i], 2) <= (d[i, 0] / abs(ut.function_normal_cdf_inv(delta))) for i in
                    range(T)]  # ut.function_normal_cdf_inv(delta)

    eqCon_sdp = None  # convert the SDP constraint in the objective, # eqCon_sdp = cp.norm(GAMMA, "nuc") == xi
    if nineq > 0:
        slacks = cp.Variable(nineq)  # define slack variables
        ineqCon = G * x_ + slacks == h
        slacksCon = slacks >= 0
    else:
        ineqCon = slacks = slacksCon = None
    cons_collected = [eqCon, eqCon_sdp, ineqCon] + prob_ineqCon + [slacksCon]

    cons = [constraint for constraint in cons_collected if constraint is not None]
    prob = cp.Problem(obj, cons)

    A_, b_, c_, cone_dims = scs_data_from_cvxpy_problem(prob, cp_SCS=sol_opt)

    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A_, b_, c_, cone_dims, eps=1e-5)
    # end = time.perf_counter()
    # print("[DIFFCP] Compute solution and set up derivative: %.4f s." % (end - start))

    return x, y, s, derivative, adjoint_derivative, A_, b_, c_
示例#25
0
 def choose_phi(self, phi_name):
     if phi_name == 'logistic':
         self.cvx_phi = lambda z: cvx.logistic(-z)
     elif phi_name == 'hinge':
         self.cvx_phi = lambda z: cvx.pos(1.0 - z)
     elif phi_name == 'exponential':
         self.cvx_phi = lambda z: cvx.exp(-z)
     else:
         print("Your surrogate function doesn't exist.")
示例#26
0
def get_constr_error(constr):
    if isinstance(constr, cp.constraints.Equality):
        error = cp.abs(constr.args[0] - constr.args[1])
    elif isinstance(constr, cp.constraints.Inequality):
        error = cp.pos(constr.args[0] - constr.args[1])
    elif isinstance(constr, cp.constraints.PSD):
        mat = constr.args[0] - constr.args[1]
        error = cp.neg(cp.lambda_min(mat + mat.T) / 2)
    return cp.sum(error)
def solve_l1_svm(X, y, C):
    import cvxpy as cp
    n, m = X.shape
    w = cp.Variable((m,1))
    loss = cp.sum(cp.pos(1 - cp.multiply(y, X @ w)))
    reg = cp.norm(w, 1)
    prob = cp.Problem(cp.Minimize(loss/n + C*reg))
    prob.solve(solver="GUROBI")
    return w.value
示例#28
0
def get_constr_error(constr):
    if isinstance(constr, cvx.constraints.EqConstraint):
        error = cvx.abs(constr.args[0] - constr.args[1])
    elif isinstance(constr, cvx.constraints.LeqConstraint):
        error = cvx.pos(constr.args[0] - constr.args[1])
    elif isinstance(constr, cvx.constraints.PSDConstraint):
        mat = constr.args[0] - constr.args[1]
        error = cvx.neg(cvx.lambda_min(mat + mat.T)/2)
    return cvx.sum_entries(error)
示例#29
0
    def _stamp_preference_constraints(self,
                                      X,
                                      x_sensitive,
                                      w,
                                      cons_type,
                                      s_val_to_cons_sum=None):
        """
            No need to pass s_val_to_cons_sum for preferred treatment (envy free) constraints
            # 1 - pref imp, 2 - EF, 3 - pref imp & EF
        """

        assert cons_type in self.VALID_PREFERED_CONSTRAINTS
        assert cons_type == self.CONSTRAINT_PREFERED_TREATMENT or s_val_to_cons_sum is not None
        assert set(x_sensitive) == w.keys()

        group_labels = set(x_sensitive)
        prod_dict = {}
        for z in group_labels:
            idx = x_sensitive == z
            Xz = X[idx, :]
            nz = float(Xz.shape[0])
            if self.sparse_formulation:
                Uz, mz = np.unique(
                    Xz, axis=0
                )  #dropping duplicates so that we have fewer constraints
                Tz = Uz * mz[:, np.newaxis]
                prod_dict[z] = {
                    o: cp.sum(cp.pos(Tz * wo)) / nz
                    for o, wo in w.items()
                }
            else:
                prod_dict[z] = {
                    o: cp.sum(cp.maximum(0.0, Xz * wo)) / nz
                    for o, wo in w.items()
                }

        constraints = []
        if cons_type == self.CONSTRAINT_PREFERED_IMPACT:
            for z in group_labels:
                constraints.append(prod_dict[z][z] >= s_val_to_cons_sum[z][z])

        elif cons_type == self.CONSTRAINT_PREFERED_TREATMENT:
            for z in group_labels:
                other_groups = set(group_labels) - {z}
                for o in other_groups:
                    constraints.append(prod_dict[z][z] >= prod_dict[z][o])

        elif cons_type == self.CONSTRAINT_PREFERED_BOTH:
            for z in group_labels:
                constraints.append(prod_dict[z][z] >=
                                   s_val_to_cons_sum[z][z])  #preferred impact
                other_groups = set(group_labels) - {z}
                for o in other_groups:
                    constraints.append(prod_dict[z][z] >= prod_dict[z][o])

        return constraints
示例#30
0
 def _genObjectiveFunc(self, currentPosition, lookbackPeriod=None):
     if lookbackPeriod is not None:
         returns = self.returns.reindex(lookbackPeriod).fillna(0.0).values
     else:
         returns = self.returns.fillna(0.0).values
     T, _ = returns.shape
     CVaR = self.aux + cvx.sum(
         cvx.pos(-(currentPosition + self.cvxTrades) @ returns.T -
                 self.aux)) / T / (1 - self.cvarConf)
     self.cvxObjective = cvx.Minimize(CVaR)
示例#31
0
def solve_svm(X, y, C, sample_weight=None, solver_kws={}):
    """
    Solves soft-margin SVM problem.

    min_{beta, intercept}
    (1/n) * sum_{i=1}^n [1  - y_i * (x^T beta + intercept)] + C * ||beta||_1

    Parameters
    ----------
    X: (n_samples, n_features)

    y: (n_samples, )

    C: float
        Strictly positive tuning parameter.

    sample_weight: None, (n_samples, )
        Weights for samples.

    solver_kws: dict
        Keyword arguments to cp.solve

    Output
    ------
    beta, intercept, problem

    beta: (n_features, )
        SVM normal vector.

    intercept: float
        SVM intercept.

    problem: cp.Problem

    y_hat = np.sign(x.dot(beta) + intercept)
    """
    if sample_weight is not None:
        raise NotImplementedError

    n_samples, n_features = X.shape
    y = y.reshape(-1, 1)

    beta = cp.Variable((n_features, 1))
    intercept = cp.Variable()
    C = cp.Parameter(value=C, nonneg=True)

    # TODO: should we make this + intercept
    loss = cp.sum(cp.pos(1 - cp.multiply(y, X * beta + intercept)))
    reg = cp.norm(beta, 1)
    objective = loss / n_samples + C * reg

    problem = cp.Problem(cp.Minimize(objective))
    problem.solve(**solver_kws)

    return beta.value, intercept.value, problem
示例#32
0
		def __percentile_constraint_restricted(A, x, constr, beta, slack=None):
			r"""
			Form convex restriction to DVH constraint.

			Upper constraint:

			:math: \sum (beta + (Ax - d + s)))_+ \le \beta * \phi

			Lower constraint::

			:math: \sum (beta - (Ax - d - s)))_+ \le \beta * \phi

			.. math:

			   \mbox{Here, $d$ is a target dose, $s$ is a nonnegative
			   slack variable, and $\phi$ is a voxel limit based on the
			   structure size and the constraint's percentile
			   threshold.}

			Arguments:
				A: Structure-specific dose matrix to use in constraint.
				x (:class:`cvxpy.Variable`): Beam intensity variable.
				constr (:class:`PercentileConstraint`): Dose constraint.
				slack (:obj:`bool`, optional): If ``True``, include
					slack variable in constraint formulation.

			Returns:
				:class:`cvxpy.Constraint`: :mod:`cvxpy` representation
				of convex restriction to dose constraint.

			Raises:
				TypeError: If ``constr`` not of type
					:class:`PercentileConstraint`.
			"""
			if not isinstance(constr, PercentileConstraint):
				raise TypeError('parameter constr must be of type {}'
								'Provided: {}'
								''.format(PercentileConstraint, type(constr)))

			sign = 1 if constr.upper else -1
			fraction = float(sign < 0) + sign * constr.percentile.fraction
			p = fraction * A.shape[0]
			dose = constr.dose.value
			if slack is None:
				slack = 0.
			return cvxpy.sum_entries(cvxpy.pos(
					beta + sign * (A*x - (dose + sign * slack)) )) <= beta * p
示例#33
0
文件: svm.py 项目: pratikac/16.763
n = 10
data = []
for i in range(N):
    data += [(1, cvxopt.normal(n, mean=1.0, std=2.0))]
for i in range(M):
    data += [(-1, cvxopt.normal(n, mean=-1.0, std=2.0))]

# Construct problem.
gamma = cp.Parameter(sign="positive")
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
#a = mi.SparseVar(n, nonzeros=6)
a = cp.Variable(n)
b = cp.Variable()

slack = [cp.pos(1 - label*(sample.T*a - b)) for (label, sample) in data]
objective = cp.Minimize(cp.norm(a, 2) + gamma*sum(slack))
p = cp.Problem(objective)
# Extensions can attach new solve methods to the CVXPY Problem class.
p.solve(method="admm")

# Count misclassifications.
errors = 0
for label, sample in data:
    if label*(sample.T*a - b).value < 0:
        errors += 1

print "%s misclassifications" % errors
print a.value
print b.value
示例#34
0
文件: devices.py 项目: mwytock/dem
 def cost(self):
     return self.alpha*cvx.pos(self.power - self.terminals[0].power_var)
示例#35
0
    def test_readme_examples(self):
        import cvxopt
        import numpy

        # Problem data.
        m = 30
        n = 20
        A = cvxopt.normal(m,n)
        b = cvxopt.normal(m)

        # Construct the problem.
        x = cp.Variable(n)
        objective = cp.Minimize(sum(cp.square(A*x - b)))
        constraints = [0 <= x, x <= 1]
        p = cp.Problem(objective, constraints)

        # The optimal objective is returned by p.solve().
        result = p.solve()
        # The optimal value for x is stored in x.value.
        print x.value
        # The optimal Lagrange multiplier for a constraint
        # is stored in constraint.dual_value.
        print constraints[0].dual_value

        ####################################################

        # Scalar variable.
        a = cp.Variable()

        # Column vector variable of length 5.
        x = cp.Variable(5)

        # Matrix variable with 4 rows and 7 columns.
        A = cp.Variable(4, 7)

        ####################################################

        # Positive scalar parameter.
        m = cp.Parameter(sign="positive")

        # Column vector parameter with unknown sign (by default).
        c = cp.Parameter(5)

        # Matrix parameter with negative entries.
        G = cp.Parameter(4, 7, sign="negative")

        # Assigns a constant value to G.
        G.value = -numpy.ones((4, 7))

        # Raises an error for assigning a value with invalid sign.
        with self.assertRaises(Exception) as cm:
            G.value = numpy.ones((4,7))
        self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.")

        ####################################################
        a = cp.Variable()
        x = cp.Variable(5)

        # expr is an Expression object after each assignment.
        expr = 2*x
        expr = expr - a
        expr = sum(expr) + cp.norm(x, 2)

        ####################################################

        import numpy as np
        import cvxopt
        from multiprocessing import Pool

        # Problem data.
        n = 10
        m = 5
        A = cvxopt.normal(n,m)
        b = cvxopt.normal(n)
        gamma = cp.Parameter(sign="positive")

        # Construct the problem.
        x = cp.Variable(m)
        objective = cp.Minimize(sum(cp.square(A*x - b)) + gamma*cp.norm(x, 1))
        p = cp.Problem(objective)

        # Assign a value to gamma and find the optimal x.
        def get_x(gamma_value):
            gamma.value = gamma_value
            result = p.solve()
            return x.value

        gammas = np.logspace(-1, 2, num=2)
        # Serial computation.
        x_values = [get_x(value) for value in gammas]

        ####################################################
        n = 10

        mu = cvxopt.normal(1, n)
        sigma = cvxopt.normal(n,n)
        sigma = sigma.T*sigma
        gamma = cp.Parameter(sign="positive")
        gamma.value = 1
        x = cp.Variable(n)

        # Constants:
        # mu is the vector of expected returns.
        # sigma is the covariance matrix.
        # gamma is a Parameter that trades off risk and return.

        # Variables:
        # x is a vector of stock holdings as fractions of total assets.

        expected_return = mu*x
        risk = cp.quad_form(x, sigma)

        objective = cp.Maximize(expected_return - gamma*risk)
        p = cp.Problem(objective, [sum(x) == 1])
        result = p.solve()

        # The optimal expected return.
        print expected_return.value

        # The optimal risk.
        print risk.value

        ###########################################

        N = 50
        M = 40
        n = 10
        data = []
        for i in range(N):
            data += [(1, cvxopt.normal(n, mean=1.0, std=2.0))]
        for i in range(M):
            data += [(-1, cvxopt.normal(n, mean=-1.0, std=2.0))]

        # Construct problem.
        gamma = cp.Parameter(sign="positive")
        gamma.value = 0.1
        # 'a' is a variable constrained to have at most 6 non-zero entries.
        a = cp.Variable(n)#mi.SparseVar(n, nonzeros=6)
        b = cp.Variable()

        slack = [cp.pos(1 - label*(sample.T*a - b)) for (label, sample) in data]
        objective = cp.Minimize(cp.norm(a, 2) + gamma*sum(slack))
        p = cp.Problem(objective)
        # Extensions can attach new solve methods to the CVXPY Problem class.
        #p.solve(method="admm")
        p.solve()

        # Count misclassifications.
        errors = 0
        for label, sample in data:
            if label*(sample.T*a - b).value < 0:
                errors += 1

        print "%s misclassifications" % errors
        print a.value
        print b.value

    # # Risk return tradeoff curve
    # def test_risk_return_tradeoff(self):
    #     from math import sqrt
    #     from cvxopt import matrix
    #     from cvxopt.blas import dot
    #     from cvxopt.solvers import qp, options
    #     import scipy

    #     n = 4
    #     S = matrix( [[ 4e-2,  6e-3, -4e-3,   0.0 ],
    #                  [ 6e-3,  1e-2,  0.0,    0.0 ],
    #                  [-4e-3,  0.0,   2.5e-3, 0.0 ],
    #                  [ 0.0,   0.0,   0.0,    0.0 ]] )
    #     pbar = matrix([.12, .10, .07, .03])

    #     N = 100
    #     # CVXPY
    #     Sroot = numpy.asmatrix(scipy.linalg.sqrtm(S))
    #     x = cp.Variable(n, name='x')
    #     mu = cp.Parameter(name='mu')
    #     mu.value = 1 # TODO cp.Parameter("positive")
    #     objective = cp.Minimize(-pbar*x + mu*quad_over_lin(Sroot*x,1))
    #     constraints = [sum(x) == 1, x >= 0]
    #     p = cp.Problem(objective, constraints)

    #     mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]
    #     xs = []
    #     for mu_val in mus:
    #         mu.value = mu_val
    #         p.solve()
    #         xs.append(x.value)
    #     returns = [ dot(pbar,x) for x in xs ]
    #     risks = [ sqrt(dot(x, S*x)) for x in xs ]

    #     # QP solver
示例#36
0
 def loss(self, A, U): return cp.sum_entries(cp.pos(ones(A.shape)-cp.mul_elemwise(cp.Constant(A), U)))
 def decode(self, A): return sign(A) # return back to Boolean
示例#37
0
    constraints.append(
    cvx.sum_entries(N[:,j]) == I[j] )
prob = cvx.Problem(obj, constraints)
prob.solve(solver=cvx.SCS)

s1 = cvx.pos(q-cvx.diag(Acontr.T*N*Tcontr));
optimal_net_profit = prob.value
optimal_penalty = p.T*s1
optimal_revenue = optimal_penalty + optimal_net_profit
print "status:", prob.status
print "optimal net profit:", optimal_net_profit
print "optimal penalty:", prob.value
print "optimal revenue:", prob+p.T*prob.value
'''
# Greedy Aproach displaying without contracts
Nignore = cvx.Variable(n,T)
obj2 = cvx.Maximize(cvx.sum_entries(cvx.mul_elemwise(R, Nignore))) #np.reshape(R,n*T).T *Nignore[:,:])
constraints2 = [Nignore >= 0]
for j in range(T):
    constraints2.append(
    cvx.sum_entries(Nignore[:,j]) == I[j] )
prob2 = cvx.Problem(obj2, constraints2)
prob2.solve()

s2 = cvx.pos(q-cvx.diag(Acontr.T*Nignore*Tcontr))
greedy_net_profit = prob2.value - p.T*s2
greedy_penalty =  p.T*s2
print "greedy status:", prob2.status
print "greedy net_profit:", greedy_net_profit.value
print "greedy penalty:", greedy_penalty.value
print "greedy revenue:", prob2.value
示例#38
0
 def loss(self, A, U):
     return cp.sum_entries(sum(cp.mul_elemwise(1*(b >= A),\
             cp.pos(U-b*ones(A.shape))) + cp.mul_elemwise(1*(b < A), \
             cp.pos(-U + (b+1)*ones(A.shape))) for b in range(int(self.Amin), int(self.Amax))))