def q3():
    def K(x1,x2):
        return (1+ np.inner(x1[:-1],x2[:-1]))**2
    
    Q = np.zeros((len(data),len(data)))
    
    for i in range(len(data)):
        for j in range(len(data)):
            Q[i,j]= data[i,-1]*data[j,-1]*K(data[i],data[j])
    alpha = cvx.Variable(len(data))
    
    obj = cvx.Minimize(0.5*cvx.quad_form(alpha,Q)-cvx.sum_entries(alpha))
    constraint = [cvx.sum_entries(cvx.mul_elemwise(data[:,-1],alpha)) == 0, alpha>=0]
    prob = cvx.Problem(obj,constraint)
    prob.solve()
    ret = alpha.value
    
    svid = next(i for i,x in enumerate(ret) if x>1e-5)    
    
    b = data[svid,-1] - sum(ret[i,0]*data[i,-1]*K(data[i],data[svid]) for i in range(len(data)) if ret[i,0]>1e-5)
    
    def getvalue(X):
        XX = np.append(X,[-1])
        return sum(ret[i,0]*data[i,-1]*K(XX,data[i]) for i in range(len(data))) + b
    
    ks = [(i,j,i**2,j**2) for j in range(10) for i in range(10)]
    vs = [getvalue(k[:2]) for k in ks]
    
    lr = lm.LinearRegression()
    lr.fit(ks,vs)
    print ret
    print lr.coef_*9,lr.intercept_*9
Esempio n. 2
0
def create(**kwargs):
    # m>k
    k = kwargs['k']  #class
    m = kwargs['m']  #instance
    n = kwargs['n']  #dim
    p = 5   #p-largest
    q = 10
    X = problem_util.normalized_data_matrix(m,n,1)
    Y = np.random.randint(0, k-1, (q,m))

    Theta = cp.Variable(n,k)
    t = cp.Variable(q)
    texp = cp.Variable(m)
    f = cp.sum_largest(t, p)+cp.sum_entries(texp) + cp.sum_squares(Theta)
    C = []
    C.append(cp.log_sum_exp(X*Theta, axis=1) <= texp)
    for i in range(q):
        Yi = one_hot(Y[i], k)
        C.append(-cp.sum_entries(cp.mul_elemwise(X.T.dot(Yi), Theta)) == t[i])

    t_eval = lambda: np.array([
        -cp.sum_entries(cp.mul_elemwise(X.T.dot(one_hot(Y[i], k)), Theta)).value for i in range(q)])
    f_eval = lambda: cp.sum_largest(t_eval(), p).value \
        + cp.sum_entries(cp.log_sum_exp(X*Theta, axis=1)).value \
        + cp.sum_squares(Theta).value
    
    return cp.Problem(cp.Minimize(f), C), f_val
Esempio n. 3
0
def create(m, n):
    mu = 1
    rho = 1
    sigma = 0.1

    A = problem_util.normalized_data_matrix(m, n, mu)
    x0 = sp.rand(n, 1, rho)
    x0.data = np.random.randn(x0.nnz)
    x0 = x0.toarray().ravel()

    b = np.sign(A.dot(x0) + sigma*np.random.randn(m))
    A[b>0,:] += 0.7*np.tile([x0], (np.sum(b>0),1))
    A[b<0,:] -= 0.7*np.tile([x0], (np.sum(b<0),1))

    P = la.block_diag(np.random.randn(n-1,n-1), 0)

    lam = 1
    x = cp.Variable(A.shape[1])

    # Straightforward formulation w/ no constraints
    # TODO(mwytock): Fix compiler so this works
    z0 = 1 - sp.diags([b],[0])*A*x + cp.norm1(P.T*x)
    f_eval = lambda: (lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z0, 0))).value

    # Explicit epigraph constraint
    t = cp.Variable(1)
    z = 1 - sp.diags([b],[0])*A*x + t
    f = lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z, 0))
    C = [cp.norm1(P.T*x) <= t]
    return cp.Problem(cp.Minimize(f), C), f_eval
Esempio n. 4
0
def create(m, n):
    # Generate random points uniform over hypersphere
    A = np.random.randn(m, n)
    A /= np.sqrt(np.sum(A**2, axis=1))[:,np.newaxis]
    A *= (np.random.rand(m)**(1./n))[:,np.newaxis]

    # Shift points and add some outliers
    # NOTE(mwytock): causes problems for operator splitting, should fix
    #x0 = np.random.randn(n)
    x0 = np.zeros(n)
    A += x0

    k = max(m/50, 1)
    idx = np.random.randint(0, m, k)
    A[idx, :] += np.random.randn(k, n)
    lam = 1
    x = cp.Variable(n)
    rho = cp.Variable(1)

    # Straightforward expression
    #z = np.sum(A**2, axis=1) - 2*A*x + cp.sum_squares(x)  # z_i = ||a_i - x||^2
    #f = cp.sum_entries(cp.max_elemwise(z - rho, 0)) + lam*cp.max_elemwise(0, rho)

    # Explicit epigraph form
    t = cp.Variable(1)
    z = np.sum(A**2, axis=1) - 2*A*x + t  # z_i = ||a_i - x||^2
    z0 = np.sum(A**2, axis=1) - 2*A*x + cp.sum_squares(x)
    f_eval = lambda: ((1./n)*cp.sum_entries(cp.max_elemwise(z0 - rho, 0)) + cp.max_elemwise(0, rho)).value
    f = (1./n)*cp.sum_entries(cp.max_elemwise(z-rho, 0)) + lam*cp.sum_entries(cp.max_elemwise(rho, 0))
    C = [cp.sum_squares(x) <= t]

    return cp.Problem(cp.Minimize(f), C), f_eval
Esempio n. 5
0
def water_filling(n,a,sum_x=1):
  '''
Boyd and Vandenberghe, Convex Optimization, example 5.2 page 145
Water-filling.
  
This problem arises in information theory, in allocating power to a set of
n communication channels in order to maximise the total channel capacity.
The variable x_i represents the transmitter power allocated to the ith channel, 
and log(α_i+x_i) gives the capacity or maximum communication rate of the channel. 
The objective is to minimize  -∑log(α_i+x_i) subject to the constraint ∑x_i = 1 
  '''
  # Declare variables and parameters
  x = cvx.Variable(n)
  alpha = cvx.Parameter(n,sign='positive')
  alpha.value = a
  #alpha.value = np.ones(n)
  # Choose objective function. Interpret as maximising the total communication rate of all the channels
  obj = cvx.Maximize(cvx.sum_entries(cvx.log(alpha + x)))
  # Declare constraints
  constraints = [x >= 0, cvx.sum_entries(x) - sum_x == 0]
  # Solve
  prob = cvx.Problem(obj, constraints)
  prob.solve()
  if(prob.status=='optimal'):
    return prob.status,prob.value,x.value
  else:
    return prob.status,np.nan,np.nan
Esempio n. 6
0
def find_ideal_pt_for_person_in_ball(center, radius, idealpt_and_radius, constraint="l1"):
    X = cvxpy.Variable(5)  # 1 point for each item
    fun = 0
    y = idealpt_and_radius[0]
    w = idealpt_and_radius[1]
    sumsq = math.sqrt(sum([math.pow(w[i], 2) for i in range(5)]))
    w = [w[i] / sumsq for i in range(5)]

    for slider in range(5):
        fun += w[slider] * cvxpy.abs(X[slider] - y[slider])
    obj = cvxpy.Minimize(fun)
    constraints = [X >= 0, X[0] + X[1] + X[2] - X[3] + 162 == X[4]]

    if constraint == "l1":
        constraints += [cvxpy.sum_entries(
            cvxpy.abs(X[0:4] - center[0:4])) <= radius]
    else:
        constraints += [cvxpy.sum_entries(
            cvxpy.square(X[0:4] - center[0:4])) <= radius**2]

    prob = cvxpy.Problem(obj, constraints)
    result = prob.solve()
    items = [X.value[i, 0] for i in range(5)]

    if constraint == "l1":
        credits = [abs(items[i] - center[i]) / radius for i in range(4)]
    else:
        credits = [(items[i] - center[i])**2 / radius**2 for i in range(4)]

    deficit = calculate_deficit(items)
    items.append(deficit)
    return items, credits
Esempio n. 7
0
def main():
    x = [[1,0],[0,1],[0,-1],[-1,0],[0,2],[0,-2],[-2,0]]
    y = [-1, -1, -1, 1, 1, 1, 1]
    Q = np.empty((7,7))
    for i in range(0,7):
        for j in range(0,7):
            Q[i,j] = y[i]*y[j]*K(x[i],x[j])

    p = np.array([-1] * 7)
    alpha = cp.Variable(7)
    objective = cp.Minimize(0.5 * cp.quad_form(alpha, Q) + cp.sum_entries(cp.mul_elemwise(p, alpha)))
    constraints = [cp.sum_entries(cp.mul_elemwise(y,alpha)) == 0]
    for i in range(0,7):
        constraints.append(alpha[i]>=0)
    prob = cp.Problem(objective, constraints)
    prob.solve()
    z = [[1,math.sqrt(2)*x1, math.sqrt(2)*x2, x1 ** 2, x2 ** 2] for (x1,x2) in x]
    w = [0] * 5
    for i in range(0,7):
        w += np.dot(float(alpha.value[i]) * y[i], z[i])
    print "w=",sum(w)
    #choose any support vector (z,y) to calculate the b
    print "b=",y[1] - np.dot(w, z[1])
    print "status:", prob.status
    print "optimal value", prob.value
    print alpha.value
    print sum(alpha.value)
Esempio n. 8
0
def softmax_loss(Theta, X, y):
    m = len(y)
    n, k = Theta.size
    Y = sp.coo_matrix((np.ones(m), (np.arange(m), y)), shape=(m, k))
    print cp.__file__
    return (cp.sum_entries(cp.log_sum_exp(X*Theta, axis=1)) -
            cp.sum_entries(cp.mul_elemwise(Y, X*Theta)))
Esempio n. 9
0
def main():
    TARGET = 0
    y = []
    x = []
    for line in open("features.train"):
        vec = line.strip().split()
        x.append([float(vec[1]),float(vec[2])])
        if int(float(vec[0])) == TARGET:
            y.append(1)
        else:
            y.append(-1)
    Q = np.empty((len(x),len(x)))
    p = [-1] * len(x)
    for i in range(0,len(x)):
        for j in range(0,len(x)):
            Q[i,j] = y[i]*y[j] * K(x[i],x[j])

    alpha = cp.Variable(len(x))
    C = 0.01
    objective = cp.Minimize(0.5 * cp.quad_form(alpha,Q) + cp.sum_entries(cp.mul_elemwise(p,alpha)))
    constraints = [cp.sum_entries(cp.mul_elemwise(y,alpha)) == 0]
    for i in range(0,len(x)):
        constraints.append(alpha[i]>=0)
    
    prob = cp.Problem(objective, constraints)
    prob.solve()
    print "sum(alpha)=",sum(alpha.value)
    #choose any support vector (z,y) to calculate the b
    print "status:", prob.status
    print "optimal value", prob.value
Esempio n. 10
0
 def relax(self):
     """Convex relaxation.
     """
     constr = super(GroupAssign, self).relax()
     return constr + [
         cvx.sum_entries(self, axis=1) == 1,
         cvx.sum_entries(self, axis=0) == self.col_sum[np.newaxis, :]
     ]
def cvx_mix(data_file = working_dir + 'source.csv', target_file = working_dir + 'target.csv', result_file_prefix = working_dir + 'results_', dec_pl = 3, distances = True, norm = 2, solver='ECOS', verbose = False):
    
    p = norm 
    dec_places = '%.' + str(dec_pl) + 'f'
    results_table = []

    source_df = pd.read_csv(data_file)    
    target_df = pd.read_csv(target_file)
    
    # A will be the transpose of the matrix given by the source populations; pop_names is the names of the sources; 
    A = np.transpose(source_df.values[:,1:])
    pop_names = [r[0] for r in source_df.values]
    num_src_pops = A.shape[1]
    
    x = cvx.Variable(A.shape[1])
    
    # T is the matrix given by the target populations.
    T = target_df.values[:,1:] 
     
    # For the transpose of each row b of T we minimize || Ax - b ||_p  (using the L_p norm; this defaults to Euclidean: p = 2),
    # subject to x > 0 and || x ||_1 = 1
    
    for tr in range(T.shape[0]):
        
        b = T[tr,:]

        objective = cvx.Minimize(cvx.sum_entries(cvx.power((A*x - b),p)))
        constraints = [0 <= x, x <= 1, cvx.sum_entries(x) == 1]
    
        prob = cvx.Problem(objective, constraints)
    
        min_dist_to_p = prob.solve(solver=solver, verbose=verbose)
        
        # work around for floating point errors for tiny distances giving negative sum of squares
        
        if min_dist_to_p >= 0:
            pass
        else:
            min_dist_to_p = cvx.sum_entries(cvx.power((A*x - b),p)).value
        
        min_dist = np.power(min_dist_to_p,1/p)
        
        y = 100 * x
        
        # our list comprehension should really be over x.value.shape[0], but this is always just equal to A.shape[1] = len(pop_names) = num_src_pops
        
        results_table.append([target_df.values[tr][0]] + [dec_places % (y.value[i][0,0]) for i in range(num_src_pops)] + ['%.6f' % min_dist])
               
    # write results_file
    result_file_suffix = time.strftime("%d-%m-%y_%H-%M-%S", time.gmtime()) 
    headers = ["Population"] + pop_names + ["Min dist^2"]
    df = pd.DataFrame(results_table, columns=headers)
    if distances == False:
        df = df.transpose()[:-1].transpose()
    df.to_csv(result_file_prefix + result_file_suffix + '.csv')
    
    return
Esempio n. 12
0
File: nd.py Progetto: Daiver/jff
def runndCVXPy(A, b, clambda):
    n_vars = A.shape[1]
    x = cvxpy.Variable(n_vars)
    obj = cvxpy.Minimize(
            clambda * cvxpy.sum_entries(cvxpy.abs(x)) +
            0.5     * cvxpy.sum_entries(cvxpy.square(A * x - b)))
    prob = cvxpy.Problem(obj, [])
    prob.solve(verbose=False)
    return x.value
Esempio n. 13
0
def kliep_learning(phi_te, phi_tr):
    """
    solve the convex optimization problem using cvxpy
    phi_te is kernel of test samples
    phi_tr is kernel of training samples
    """
    x = cvx.Variable(phi_te.shape[1])
    objective = cvx.Maximize(cvx.sum_entries(cvx.log(phi_te*x)))
    constraints = [cvx.sum_entries(phi_tr*x) == phi_tr.shape[0], x>=0]
    prob = cvx.Problem(objective, constraints)
    prob.solve()
    return prob, x
Esempio n. 14
0
def feasibility_regression(X, pairwise_constraints_indices, 
                      bag_indices,upper_p_bound_bags,
                      diff_upper_bound_pairs,diff_lower_bound_pairs,
                      lower_p_bound_bags):

    theta = cp.Variable(X.shape[1])
    reg = cp.square(cp.norm(theta, 2))
    
    constraints = []
    added_pairs = []
    pair_ind = 0
    for pair in pairwise_constraints_indices:
        bag_high = bag_indices[pair[0]]
        bag_low = bag_indices[pair[1]]
      
        scores_high = (1./len(bag_high))*X[bag_high]*theta
        scores_low = (1./len(bag_low))*X[bag_low]*theta
    
        if pair in diff_upper_bound_pairs:
            constraints.append(cp.sum_entries(scores_high) - cp.sum_entries(scores_low) < diff_upper_bound_pairs[pair])
            
        if pair in diff_lower_bound_pairs:
            constraints.append(cp.sum_entries(scores_high) - cp.sum_entries(scores_low) > diff_lower_bound_pairs[pair])
        else:
            constraints.append(cp.sum_entries(scores_high) - cp.sum_entries(scores_low) > 0)
    
        if pair[0] not in added_pairs:
            if pair[0] in upper_p_bound_bags:
                constraints.append(cp.sum_entries(scores_high)<=upper_p_bound_bags[pair[0]])
            if pair[0] in lower_p_bound_bags:
                constraints.append(cp.sum_entries(scores_high)>=lower_p_bound_bags[pair[0]])
            added_pairs.append(pair[0])
        if pair[1] not in added_pairs:
            if pair[1] in upper_p_bound_bags:
                constraints.append(cp.sum_entries(scores_low)<=upper_p_bound_bags[pair[1]])
            if pair[1] in lower_p_bound_bags:
                constraints.append(cp.sum_entries(scores_low)>=lower_p_bound_bags[pair[1]])
            added_pairs.append(pair[1])
        pair_ind+=1
    
    prob = cp.Problem(cp.Minimize(1*reg),constraints = constraints)

    try:
        prob.solve()
    except:
        prob.solve(solver="SCS")
    w_t = np.squeeze(np.asarray(np.copy(theta.value)))
    return w_t        
Esempio n. 15
0
def quantileReg(t,y,alpha):
 
    # Données de la regression
    m=y.size
    n=2
    
 
    A=np.zeros((m,n))
    b=np.zeros((m,1))
    
    A[:,0]=t
    A[:,1]=np.ones((m,))
    
    b[:,0]=y

    # Variables
    x = cp.Variable(n)
    ep = cp.Variable(m)
    em = cp.Variable(m)
    
    # Constitution du problème
    objective = cp.Minimize(cp.sum_entries(cp.square(alpha*ep +(1.-alpha)*em)))
    constraints = [A*x-b+em-ep==0,ep>=0,em>=0]
    prob = cp.Problem(objective, constraints)
    
    # Résolution
    prob.solve()    
    xres=np.array(x.value)  
    
    return xres
Esempio n. 16
0
def solve_problem(Q, c, verbose=False):
    u = cvx.Variable(c.size)
    obj = cvx.Minimize(cvx.quad_form(u, Q)+cvx.sum_entries(c.T*u))
    prob = cvx.Problem(obj)
    prob.solve(verbose=verbose, solver=cvx.SCS)
    uval = np.array(u.value).ravel()
    return np.reshape(uval, (c.size // DIM_SIZE, DIM_SIZE))
Esempio n. 17
0
def create(**kwargs):
    n = kwargs["n"]

    x = cp.Variable(n)
    u = np.random.rand(n)
    f =  cp.sum_squares(x-u)+cp.sum_entries(cp.max_elemwise(x, 0))
    return cp.Problem(cp.Minimize(f))
Esempio n. 18
0
def blockCompressedSenseHuber(block, rho, alpha, basis_premultiplied, mixing_matrix):
    """ Run reversed Huber compressed sensing given alpha and a basis."""

    # Get block size.
    block_len = block.shape[0] * block.shape[1]

    # Unravel this image into a single column vector.
    img_vector = np.asmatrix(block.ravel()).T

    # Determine m (samples)
    img_measured = mixing_matrix * img_vector

    # Construct the problem.
    coefficients = cvx.Variable(block_len)
    coefficients_premultiplied = basis_premultiplied * coefficients

    huber_penalty = reversed_huber(rho * coefficients / np.sqrt(alpha))

    L2 = cvx.sum_squares(coefficients_premultiplied - img_measured)
    RH = cvx.sum_entries(huber_penalty)
    objective = cvx.Minimize(L2 + 2 * alpha * RH)
    constraints = []
    problem = cvx.Problem(objective, constraints)

    # Solve.
    problem.solve(verbose=False, solver="SCS")

    # Print problem status.
    print "Problem status: " + str(problem.status)
    sys.stdout.flush()

    return coefficients.value
Esempio n. 19
0
    def updateEi2(self,sigmai,ei_):
        sigmai=np.array(sigmai)
        ei_=np.array(ei_)
        ei=cvx.Variable(I)
        si=cvx.Variable(I)
        constraints=[0<=si]
        slacost=cvx.sum_entries(cvx.mul_elemwise(self.lamb,cvx.inv_pos(self.mu-cvx.mul_elemwise(self.lamb,cvx.inv_pos(self.S-si)))))
        watcost=np.array(self.omegawat)*si
        for i in range(I):
            constraints+=[ei[i]==self.gamma[i]*si[i]]
        objective = cvx.Minimize(-np.transpose(sigmai)*ei+self.omegadc*(slacost+watcost)+rho/2*cvx.sum_squares(ei-np.array(ei_)))
        prob = cvx.Problem(objective, constraints)
        result = prob.solve(solver=slv)

        #print ei.value.A1
        #print result
        self.e=ei.value.A1
        mins=self.minServer()
        maxe=np.zeros(I)
        for i in range(I):
            maxe[i]=(self.S[i]-mins[i])*self.gamma[i]
            if self.e[i]<0:
                self.e[i]==0
            else:
                if self.e[i]>maxe[i]:
                    self.e[i]=maxe[i]
                    print('\nWarning: reducing si lower than minimum active server!!!!')
        #print("ei:",self.e)

        self.setSwitchSever(self.e)
Esempio n. 20
0
 def constraints(self):
     idx = slice(self.time_start, self.time_end)
     return [
         cvx.sum_entries(self.terminals[0].power_var[idx]) >= self.energy,
         self.terminals[0].power_var >= 0,
         self.terminals[0].power_var <= self.power_max,
     ]
def solve_s(xi, x_0, N, solver=None):
    s = cvx.Variable(N)
    obj = cvx.Maximize(cvx.sum_entries(cvx.log(x_0 + xi.T * s)))
    constr = [0 <= s]
    prob = cvx.Problem(obj, constr)
    prob.solve(solver=solver)
    return np.array(s.value).T[0]
Esempio n. 22
0
def main():
    TARGET = 0
    y = []
    x = []
    for line in open("features.train"):
        vec = line.strip().split()
        x.append([float(vec[1]),float(vec[2])])
        if int(float(vec[0])) == TARGET:
            y.append(1)
        else:
            y.append(-1)

    w = cp.Variable(2)
    b = cp.Variable()
    Q = np.identity(2)
    xi = cp.Variable(len(x))
    C = 0.01
    objective = cp.Minimize(0.5 * cp.quad_form(w,Q) + C * cp.sum_entries(xi))
    constraints = []
    for i in range(0,len(x)):
        constraints.append(y[i]*(cp.conv(x[i],w) + b) >= 1 - xi[i])
        constraints.append(xi[i]>=0)
    
    prob = cp.Problem(objective, constraints)
    prob.solve()
    print "w=",sum([ item**2 for item in w.value])
    #choose any support vector (z,y) to calculate the b
    print "b=",y[1] - np.dot(w.value, x[1])
    print "status:", prob.status
    print "optimal value", prob.value
Esempio n. 23
0
def solve_sdp(G):
    """ Solves the SDP problem:
    
    maximize    1' * s
    subject to  0 <= s <= 1
                [G, G - diag(s); G - diag(s), G] >= 0
    stolen directly from knockoff R package - solve_sdp.py
    """
    assert G.ndim == 2 and G.shape[0] == G.shape[1]
    p = G.shape[0]
    
    # Define the problem.
    s = cvx.Variable(p)
    objective = cvx.Maximize(cvx.sum_entries(s))
    constraints = [
        0 <= s, s <= 1,
        2*G - cvx.diag(s) == cvx.Semidef(p),
    ]
    prob = cvx.Problem(objective, constraints)
    
    # Solve the problem.
    prob.solve()
    assert prob.status == cvx.OPTIMAL
    
    # Return as array, not as matrix.
    return np.asarray(s.value).flatten()
def optimal_power(n, a_val, b_val, P_tot=1.0, W_tot=1.0):
  '''
Boyd and Vandenberghe, Convex Optimization, exercise 4.62 page 210
Optimal power and bandwidth allocation in a Gaussian broadcast channel.

We consider a communication system in which a central node transmits messages
to n receivers. Each receiver channel is characterized by its (transmit) power
level Pi ≥ 0 and its bandwidth Wi ≥ 0. The power and bandwidth of a receiver
channel determine its bit rate Ri (the rate at which information can be sent)
via
   Ri=αiWi log(1 + βiPi/Wi),
where αi and βi are known positive constants. For Wi=0, we take Ri=0 (which
is what you get if you take the limit as Wi → 0).  The powers must satisfy a
total power constraint, which has the form
P1 + · · · + Pn = Ptot,
where Ptot > 0 is a given total power available to allocate among the channels.
Similarly, the bandwidths must satisfy
W1 + · · · +Wn = Wtot,
where Wtot > 0 is the (given) total available bandwidth. The optimization
variables in this problem are the powers and bandwidths, i.e.,
P1, . . . , Pn, W1, . . . ,Wn.
The objective is to maximize the total utility, sum(ui(Ri),i=1..n)
where ui: R → R is the utility function associated with the ith receiver.
  '''
  # Input parameters: alpha and beta are constants from R_i equation
  n=len(a_val)
  if n!=len(b_val):
    print('alpha and beta vectors must have same length!')
    return 'failed',np.nan,np.nan,np.nan
  P=cvx.Variable(n)
  W=cvx.Variable(n)
  alpha=cvx.Parameter(n)
  beta =cvx.Parameter(n)
  alpha.value=np.array(a_val)
  beta.value =np.array(b_val)
  # This function will be used as the objective so must be DCP; i.e. element-wise multiplication must occur inside kl_div, not outside otherwise the solver does not know if it is DCP...
  R=cvx.kl_div(cvx.mul_elemwise(alpha, W),
               cvx.mul_elemwise(alpha, W + cvx.mul_elemwise(beta, P))) - \
    cvx.mul_elemwise(alpha, cvx.mul_elemwise(beta, P))
  objective=cvx.Minimize(cvx.sum_entries(R))
  constraints=[P>=0.0,
               W>=0.0,
               cvx.sum_entries(P)-P_tot==0.0,
               cvx.sum_entries(W)-W_tot==0.0]
  prob=cvx.Problem(objective, constraints)
  prob.solve()
  return prob.status,-prob.value,P.value,W.value
    def optimize_pos_basic(self, df):

        w_target = df['pos']/df['pos'].abs().sum()
        w_target = w_target.values
        
        nassets=df.shape[0]
        w = cvxpy.Variable(nassets)
        
        obj = cvxpy.Minimize(cvxpy.norm(w-w_target))
        constraints = [cvxpy.sum_entries(cvxpy.abs(w)) == 1.0]
        constraints += [cvxpy.abs(cvxpy.sum_entries(w)) <= 0.1]
        constraints += [cvxpy.abs(w) <= 1.0/nassets]
        #constraints += [w >= 0]
        prob = cvxpy.Problem(obj, constraints)
        prob.solve(verbose=False, method='dccp')
        
        return np.reshape(w.value, [nassets])
Esempio n. 26
0
def refine_als_p21(P_21, O_init, iters=10):
    n = np.shape(P_21)[0]
    m = np.shape(O_init)[1]

    O_1 = O_init
    O_2 = O_init
    H_21 = row_col_normalize_l1(np.random.rand(m,m))
    residual = np.zeros(iters)

    for it in range(iters):
        if it % 3 == 1:
            O_1 = cvx.Variable(n, m)
            constraint = [O_1 >= 0, cvx.sum_entries(O_1, axis=0) == 1]
        elif it % 3 == 2:
            O_2 = cvx.Variable(n, m)
            constraint = [O_2 >= 0, cvx.sum_entries(O_2, axis=0) == 1]
        else:
            H_21 = cvx.Variable(m, m)
            constraint = [H_21 >= 0, cvx.sum_entries(H_21) == 1]
            O_avg = (O_1 + O_2) / 2
            O_1 = O_avg
            O_2 = O_avg


        obj = cvx.Minimize(cvx.norm(P_21 - O_1 * H_21 * O_2.T))
        prob = cvx.Problem(obj, constraint)
        prob.solve()

        if prob.status != cvx.OPTIMAL:
            raise Exception("Solver did not converge!")

        print 'Iteration {}, residual norm {}'.format(it, prob.value)
        residual[it] = prob.value

        if it % 3 == 1:
            O_1 = O_1.value
        elif it % 3 == 2:
            O_2 = O_2.value
        else:
            H_21 = H_21.value

    H_21 = np.asarray(H_21)
    pi = np.sum(H_21, axis=0)
    T = normalize_m(H_21)

    return  np.asarray(O_avg), np.asarray(T), np.asarray(pi)
Esempio n. 27
0
        def compute(self, today, assets,out,returns):
            print("------------------------------- Markowitz:",today)
            #print ("Markowitz factor:",returns)
            gamma = cvx.Parameter(sign="positive")
            gamma.value = 1  # gamma is a Parameter that trades off riskmanager and return.
            #returns = np.nan_to_num(returns.T)  # time,stock to stock,time
            #print ("Markowitz factor2:",returns)
            #cov_mat = np.cov(returns)
            #Sigma = cov_mat
            Sigma = np.nan_to_num(returns)
            Sigma = Sigma.T.dot(Sigma)
            #print ("Markowitz factor2:",Sigma)
            ########################################################
            w = cvx.Variable(len(assets))
            risk = cvx.quad_form(w, Sigma)
            mu = np.array([self.target_ret] * len(assets))
            expected_return = np.reshape(mu,(-1, 1)).T * w  # w is a vector of stock holdings as fractions of total assets.
            objective = cvx.Maximize(expected_return - gamma * risk)  # Maximize(expected_return - expected_variance)
            #########################################################


            constraints = [cvx.sum_entries(w) == 0]  # dollar-neutral long/short

            sector_dist = {}
            idx = 0
            class_nos = get_sectors_no(assets)
            #print ("Markowitz class_nos:",class_nos)
            for classid in class_nos:
                if classid not in sector_dist:
                    _ = []
                    sector_dist[classid] = _
                sector_dist[classid].append(idx)
                idx += 1
            #print("sector size :", len(sector_dist),idx,sector_dist)
            for k, v in sector_dist.items():
                constraints.append(cvx.sum_entries(w[v]) <  (1 / len(sector_dist) + self.max_sector_exposure))
                constraints.append(cvx.sum_entries(w[v]) >= (1 / len(sector_dist) - self.max_sector_exposure))

            prob = cvx.Problem(objective, constraints)
            prob.solve()
            if prob.status != 'optimal':
                print ("Optimal failed %s , do nothing" % prob.status)
                return None
            #print ("Markowit weight",np.squeeze(np.asarray(w.value)))  # Remo
            out[:] = np.squeeze(np.asarray(w.value)) #每行中的列1
Esempio n. 28
0
def determine_optimal_contract(policy, problem_specs):
    num_states = problem_specs[NUM_STATES]
    # trans_matrix is a num_states x num_states x num_actions size array
    trans_prob = problem_specs[TRANS_MATRIX].for_policy(policy)
    initial_prob = problem_specs[INITIAL_PROB]
    # rewards is a num_states x num_states size matrix
    rewards = problem_specs[REWARDS]
    # trans_matrix is a num_states x num_states x num_actions size array
    costs = problem_specs[COSTS].for_policy(policy)
    # agent participation constraint
    part_const = problem_specs[PART_CONST]
    # maximum contracts vector
    max_contract = problem_specs[MAX_CONTRACT]
    max_cont = max_contract[0]
    initial_prob = np.hstack((initial_prob, initial_prob))

    ## Construct the objective
    contracts = cvx.Variable(num_states, 1)
    contracts_matrix = contracts
    # equivalent to repmat as in matlab
    for i in range(num_states - 1):
        contracts_matrix = cvx.hstack(contracts_matrix, contracts)
    # contruct the principals reward objective function
    principals_reward = rewards - contracts_matrix
    principals_reward = cvx.mul_elemwise(trans_prob, principals_reward)
    # at this step, we have the expected value of the principals reward among ending states
    principals_reward = cvx.mul_elemwise(initial_prob, principals_reward)
    # now, we have the expected value of the principal's reward among both initial and ending states
    principals_reward = cvx.sum_entries(principals_reward)
    # the objective is to maximize the principal's reward
    objective = cvx.Maximize(principals_reward)

    ## Construct the constraints
    # First, we need to make sure the contracts we use don't surpass the maximum allowed contracts in value
    constraints = [contracts < max_contract]
    # Next, construct the agent's participation constraint
    agents_reward = contracts_matrix - costs
    agents_reward = cvx.mul_elemwise(trans_prob, agents_reward)
    agents_reward = cvx.mul_elemwise(initial_prob, agents_reward)
    agents_reward = cvx.sum_entries(agents_reward)
    constraints.append(agents_reward > part_const)

    problem = cvx.Problem(objective, constraints)
    problem.solve()
    return problem
Esempio n. 29
0
def f_quantile_elemwise():
    m = 4
    k = 2
    alphas = rand(k)
    A = np.tile(alphas, (m, 1))
    X = cp.Variable(m, k)
    return cp.sum_entries(cp.max_elemwise(
        cp.mul_elemwise( -A, X),
        cp.mul_elemwise(1-A, X)))
Esempio n. 30
0
def get_constr_error(constr):
    if isinstance(constr, cvx.constraints.EqConstraint):
        error = cvx.abs(constr.args[0] - constr.args[1])
    elif isinstance(constr, cvx.constraints.LeqConstraint):
        error = cvx.pos(constr.args[0] - constr.args[1])
    elif isinstance(constr, cvx.constraints.PSDConstraint):
        mat = constr.args[0] - constr.args[1]
        error = cvx.neg(cvx.lambda_min(mat + mat.T)/2)
    return cvx.sum_entries(error)
Esempio n. 31
0
def f_quantile_elemwise():
    m = 4
    k = 2
    alphas = rand(k)
    A = np.tile(alphas, (m, 1))
    X = cp.Variable(m, k)
    return cp.sum_entries(cp.max_elemwise(
        cp.mul_elemwise( -A, X),
        cp.mul_elemwise(1-A, X)))
Esempio n. 32
0
def cvx_power_alloc_unequal_power(W, Pa, nt, nc, nac): 
    Pc = Pa*np.ones((nt,1))
    d = cvx.Variable(nc*nac) 
    objective = cvx.Maximize(cvx.sum_entries(cvx.log1p(d)/np.log(2))) 
    constraints = [W @ d <= Pc, d >= 0] 
    prob = cvx.Problem(objective, constraints) 
    prob.solve(solver='SCS') 
    print('Problem status: {}'.format(prob.status))
    return np.asarray(np.abs(d.value)).flatten()
 def fit(self, x, y, s=None):
     self.switch()
     self.preprocess(x)
     # construct objective function
     yz = cvx.mul_elemwise(y.values, self.x_ * self.w)
     objective = cvx.Minimize(cvx.sum_entries(self.cvx_phi(yz)))
     self.prob = cvx.Problem(objective)
     # solve the optimization problem
     self.optimize()
def lqr_qp_cp(C, c, lower, upper):
    n = c.shape[0]
    x = cp.Variable(n)
    obj = 0.5 * cp.quad_form(x, C) + cp.sum_entries(cp.mul_elemwise(c, x))
    cons = [lower <= x, x <= upper]
    prob = cp.Problem(cp.Minimize(obj), cons)
    prob.solve()
    assert 'optimal' in prob.status
    return np.array(x.value)
Esempio n. 35
0
def minimize(kernel, y, C, pi_):
    N = kernel.shape[0]

    a = cvx.Variable(N)
    obj = cvx.Minimize((1 / 2) * cvx.quad_form(a, kernel) - cvx.sum_entries(a))

    constraints = [
        a >= 0, a <= cvx.mul_elemwise(C, pi_),
        cvx.sum_entries(a.T * y) == 0
    ]

    prob = cvx.Problem(obj, constraints)

    prob.solve()

    print("status:", prob.status)

    return [x[0, 0] for x in a.value]
Esempio n. 36
0
    def train(self, x, y, gamma=0.1):

        # ensure labels are [0, 1]
        y[y == -1] = 0
        assert np.unique(y).tolist() == [0, 1]

        # regularization parameter
        g = cvx.Parameter(sign="positive")
        g.value = gamma / x.shape[1]

        # define model variables
        w = cvx.Variable(x.shape[1])
        b = cvx.Variable()

        # compute affine transform
        a = x * w - b

        # compute log-likelihood
        l = cvx.sum_entries(cvx.mul_elemwise(y, a)) - cvx.sum_entries(
            cvx.logistic(a))

        # minimize negative log-likelihood plus l-2 regularization
        obj = cvx.Minimize(-l + g * cvx.sum_squares(w))

        try:

            # form problem and solve
            prob = cvx.Problem(obj)
            prob.solve()

            # throw error if not optimal
            assert prob.status == 'optimal'

            # save model parameters
            self.w = np.array(w.value)
            self.b = np.array(b.value)

            # return success
            return True

        except:

            # return failure
            return False
Esempio n. 37
0
def clean_dictionary(phrase_file):
    lexicon = pt.getPhraseEntriesFromTable(phrase_file)
    lexicon = filter(pt.filterLex, lexicon)
    entries = list((entry['srcphrase'], entry['tgtphrase'], \
        entry['probValues'][0], entry['probValues'][1], \
        entry['probValues'][2], entry['probValues'][3]) \
        for entry in lexicon)

    # Make it completely random. Which two distributions we choose to work with
    #direction = True if np.random.random() <= 0.5 else False;
    direction = True
    if direction:
        #srctotgt
        pprobs = np.asarray([X[2] for X in entries])
        lprobs = np.asarray([X[4] for X in entries])
        vocab = set(X[0] for X in entries)
        index = 0
    else:
        #tgttosrc
        pprobs = np.asarray([X[3] for X in entries])
        lprobs = np.asarray([X[5] for X in entries])
        vocab = set(X[1] for X in entries)
        index = 1

    vocab = sorted(list(vocab))
    vocab = dict((phrase, idx) for idx, phrase in enumerate(vocab))
    groups = sparse.dok_matrix((len(vocab), len(entries)), dtype=float)
    for idx, entry in enumerate(entries):
        groups[vocab[entry[index]], idx] = 1
    groups = groups.tocsc()

    sparse_dists = convex_cleanup(pprobs, lprobs, groups)
    global_sol = None
    global_entropy = -100
    for dist in sparse_dists:
        solution = dist.value
        entropy = cvx.sum_entries(cvx.entr(solution)).value
        if entropy > global_entropy:
            global_sol = solution
        print(np.count_nonzero(solution),
              np.min(solution),
              np.max(solution),
              entropy,
              file=stderr)
        #solution = list(solution.getA1());

    global_sol = list(global_sol.getA1())
    groups = groups.todok()
    pruned_dictionary = ("%s\t%s\t%.4f" %(entries[key[1]][0], \
        entries[key[1]][1], \
        prob) \
        for key, prob in zip(sorted(groups.keys()), solution))

    random_utils.lines_to_file('', pruned_dictionary)

    return
Esempio n. 38
0
def get_sudoku_matrix(n):
    X = np.array([[cp.Variable(n**2) for i in range(n**2)] for j in range(n**2)])
    cons = ([x >= 0 for row in X for x in row] +
            [cp.sum_entries(x) == 1 for row in X for x in row] +
            [sum(row) == np.ones(n**2) for row in X] +
            [sum([row[i] for row in X]) == np.ones(n**2) for i in range(n**2)] +
            [sum([sum(row[i:i+n]) for row in X[j:j+n]]) == np.ones(n**2) for i in range(0,n**2,n) for j in range(0, n**2, n)])
    f = sum([cp.sum_entries(x) for row in X for x in row])
    prob = cp.Problem(cp.Minimize(f), cons)

    A = np.asarray(prob.get_problem_data(cp.ECOS)["A"].todense())
    A0 = [A[0]]
    rank = 1
    for i in range(1,A.shape[0]):
        if np.linalg.matrix_rank(A0+[A[i]], tol=1e-12) > rank:
            A0.append(A[i])
            rank += 1

    return np.array(A0)
Esempio n. 39
0
def solve(X,r):
    '''optimal q' from a sample X,r'''
    X = np.array(X); r = np.array(r);
    n,p = X.shape
    q = cvx.Variable(p)
    obj = 1/n * cvx.sum_entries(cvx.mul_elemwise(r,X*q)) - cvx.norm(q)**2
    prob = cvx.Problem(cvx.Maximize(obj))
    prob.solve()
    q = q.value.A1
    return q
Esempio n. 40
0
def prob2(X, a):
    n, m = X.shape
    
    t = cvx.Variable(m)
    prob = cvx.Problem(cvx.Minimize(0), [X*t == a, cvx.sum_entries(t) == 1, t >= 0])
    prob.solve()
    if prob.status == 'infeasible':
        return False
    else:
        return True
Esempio n. 41
0
def l1_solution(A, b, lam=0.5):
    N = A.shape[0]
    x = Variable(N)
    objective = Minimize(sum_entries(square(A * x - b)) + lam * norm(x, 1))
    constraints = []
    prob = Problem(objective, constraints)

    prob.solve()
    xhat = x.value
    return xhat
Esempio n. 42
0
def quantile_loss(alphas, Theta, X, y):
    m, n = X.shape
    k = len(alphas)
    Y = np.tile(y.flatten(), (k, 1)).T
    A = np.tile(alphas, (m, 1))
    Z = X*Theta - Y
    return cp.sum_entries(
        cp.max_elemwise(
            cp.mul_elemwise( -A, Z),
            cp.mul_elemwise(1-A, Z)))
Esempio n. 43
0
def quantile_loss(alphas, Theta, X, y):
    m, n = X.shape
    k = len(alphas)
    Y = np.tile(y, (k, 1)).T
    A = np.tile(alphas, (m, 1))
    Z = X*Theta - Y
    return cp.sum_entries(
        cp.max_elemwise(
            cp.mul_elemwise( -A, Z),
            cp.mul_elemwise(1-A, Z)))
Esempio n. 44
0
    def get_timegap_cost(self):
        """Returns the cost corresponding to timegap tracking. """
        shift = int(round(self.timegap / self.dt))
        xgapref = self.preceding_x[
                       (self.h*self.saved_h - shift)*self.nx:
                       (self.h*(self.saved_h + 1) - shift + 1)*self.nx]

        cost = cvxpy.sum_entries(cvxpy.square(self.PQ_ch_t * (self.x - xgapref)))

        return cost
Esempio n. 45
0
    def marginal_optimization(self, seed = None):
        logging.debug("Starting to merge marginals")
        # get number of cliques: n
        node_card = self.node_card; cliques = self.cliques
        d = self.nodes_num; n = self.cliques_num; m = self.clusters_num
    
        # get the junction tree matrix representation: O
        O = self.jt_rep()
        
        # get log_p is the array of numbers of sum(log(attribute's domain))
        log_p = self.log_p_func()
    
        # get log_node_card: log(C1), log(C2), ..., log(Cd)
        log_node_card = np.log(node_card)
    
        # get value of sum_log_node_card: log(C1 * C2 *...* Cd)
        sum_log_node_card = sum(log_node_card)
    
        # get the difference operator M on cluster number: m
        M = self.construct_difference()
        # initial a seed Z
        prev_Z = seed
        if prev_Z is None:
            prev_Z = np.random.rand(n,m)        
    
        # run the convex optimization for max_iter times
        logging.debug("Optimization starting...")
        for i in range(self.max_iter):
            logging.debug("The optimization iteration: "+str(i+1))
            # sum of row of prev_Z
            tmp1 = cvx.sum_entries(prev_Z, axis=0).value
        
            # tmp2 = math.log(tmp1)-1+sum_log_node_card
            tmp2 = np.log(tmp1)-1+sum_log_node_card

            # tmp3: difference of pairwise columns = prev_Z * M
            tmp3 = np.dot(prev_Z,M)
            # convex optimization
            Z = cvx.Variable(n,m)
            t = cvx.Variable(1,m)
            r = cvx.Variable()
        
            objective = cvx.Minimize(cvx.log_sum_exp(t)-self._lambda*r)
            constraints = [
                Z >= 0,
                Z*np.ones((m,1),dtype=int) == np.ones((n,1), dtype=int),
                r*np.ones((1,m*(m-1)/2), dtype=int) - 2*np.ones((1,n), dtype=int)*(cvx.mul_elemwise(tmp3, (Z*M))) + cvx.sum_entries(tmp3 * tmp3, axis=0) <= 0,
                np.ones((1,n),dtype=int)*Z >= 1,
                log_p*Z-t-np.dot(log_node_card,O)*Z+tmp2+cvx.mul_elemwise(np.power(tmp1,-1), np.ones((1,n), dtype = int)*Z) == 0
            ]
            prob = cvx.Problem(objective, constraints)
            result = prob.solve(solver='SCS',verbose=False)
            prev_Z[0:n,0:m] = Z.value

        return prev_Z, O
Esempio n. 46
0
    def get_trades(self, portfolio, t=None):
        """
        Get optimal trade vector for given portfolio at time t.

        Parameters
        ----------
        portfolio : pd.Series
            Current portfolio vector.
        t : pd.timestamp
            Timestamp for the optimization.
        """

        if t is None:
            t = pd.datetime.today()

        value = sum(portfolio)
        w = portfolio / value
        z = cvx.Variable(w.size)  # TODO pass index
        wplus = w.values + z

        if isinstance(self.return_forecast, BaseReturnsModel):
            alpha_term = self.return_forecast.weight_expr(t, wplus)
        else:
            alpha_term = cvx.sum(cvx.multiply(
                time_locator(self.return_forecast, t, as_numpy=True), wplus))

        assert(alpha_term.is_concave())

        costs, constraints = [], []

        for cost in self.costs:
            cost_expr, const_expr = cost.weight_expr(t, wplus, z, value)
            costs.append(cost_expr)
            constraints += const_expr

        constraints += [item for item in (con.weight_expr(t, wplus, z, value)
                                          for con in self.constraints)]

        for el in costs:
            assert (el.is_convex())

        for el in constraints:
            assert (el.is_dcp())

        self.prob = cvx.Problem(
            cvx.Maximize(alpha_term - sum(costs)),
            [cvx.sum_entries(z) == 0] + constraints)

        self.prob.solve(solver=self.solver, **self.solver_opts)

        logging.error(
            f'The problem is {self.prob.status}. Defaulting to no trades')
        return self._nulltrade(portfolio)

        return pd.Series(index=portfolio.index, data=(z.value.A1 * value))
Esempio n. 47
0
    def train(self):
        '''Optimize the asymmetric dual problem and return optimal w and b.'''
        if not self.training_instances:
            raise ValueError('Must set training instances before training')
        c = 10

        if isinstance(self.training_instances, List):
            y, X = sparsify(self.training_instances)
            y, X = np.array(y), X.toarray()
        else:
            X, y = self.training_instances.numpy()

        i_neg = np.array([
            ins[1] for ins in zip(y, X)
            if ins[0] == self.negative_classification
        ])
        i_pos = np.array([
            ins[1] for ins in zip(y, X)
            if ins[0] == self.positive_classification
        ])
        ones_col = np.ones((i_neg.shape[1], 1))
        pn = np.concatenate((i_pos, i_neg))
        pl = np.ones(i_pos.shape[0])
        nl = -np.ones(i_neg.shape[0])
        pnl = np.concatenate((pl, nl))
        xj_min = np.full_like(pn, self.xmin)
        xj_max = np.full_like(pn, self.xmax)
        ones_mat = np.ones_like(pnl)
        col_neg, row_sum = i_neg.shape[1], i_pos.shape[0] + i_neg.shape[0]

        # define cvxpy variables
        w = cvx.Variable(col_neg)
        b = cvx.Variable()
        xi0 = cvx.Variable(row_sum)
        t = cvx.Variable(row_sum)
        u = cvx.Variable(row_sum, col_neg)
        v = cvx.Variable(row_sum, col_neg)

        constraints = [
            xi0 >= 0, xi0 >= 1 - mul(pnl, (pn * w + b)) + t,
            t >= mul(self.c_f,
                     (mul(xj_max - pn, v) - mul(xj_min - pn, u)) * ones_col),
            u - v == 0.5 * (1 + pnl) * w.T, u >= 0, v >= 0
        ]
        # objective
        obj = cvx.Minimize(0.5 * (cvx.norm(w)) + c * cvx.sum_entries(xi0))
        prob = cvx.Problem(obj, constraints)

        if OPT_INSTALLED:
            prob.solve(solver='CVXOPT')
        else:
            prob.solve()

        self.weight_vector = [np.array(w.value).T][0]
        self.bias = b.value
Esempio n. 48
0
def envelope_fit(signal,
                 mu,
                 eta,
                 linear_term=False,
                 kind='upper',
                 period=None):
    '''
    Perform an envelope fit of a signal. See: https://en.wikipedia.org/wiki/Envelope_(waves)
    :param signal: The signal to be fit
    :param mu: A parameter to control the overall stiffness of the fit
    :param eta: A parameter to control the permeability of the envelope. A large value result in
    no data points outside the fitted envelope
    :param kind: 'upper' or 'lower'
    :return: An envelope signal of the same length as the input
    '''
    if kind == 'lower':
        signal *= -1
    n_samples = len(signal)
    if linear_term:
        beta = cvx.Variable()
    else:
        beta = 0.
    envelope = cvx.Variable(len(signal))
    mu = cvx.Parameter(sign='positive', value=mu)
    eta = cvx.Parameter(sign='positive', value=eta)
    cost = (cvx.sum_entries(cvx.huber(envelope - signal)) +
            mu * cvx.norm2(envelope[2:] - 2 * envelope[1:-1] + envelope[:-2]) +
            eta * cvx.norm1(cvx.max_elemwise(signal - envelope, 0)))
    objective = cvx.Minimize(cost)
    if period is not None:
        constraints = [
            envelope[:n_samples - period] == envelope[period:] + beta
        ]
        problem = cvx.Problem(objective, constraints)
    else:
        problem = cvx.Problem(objective)
    try:
        problem.solve(solver='MOSEK')
        #problem.solve()
    except Exception as e:
        print(e)
        print('Trying ECOS solver')
        problem.solve(solver='ECOS')
    if not linear_term:
        if kind == 'upper':
            return envelope.value.A1
        elif kind == 'lower':
            signal *= -1
            return -envelope.value.A1
    else:
        if kind == 'upper':
            return envelope.value.A1, beta.value
        elif kind == 'lower':
            signal *= -1
            return -envelope.value.A1, -beta.value
Esempio n. 49
0
def gen_data_ndim(nb_datapoints, dim, savefile, rand_seed=7):
    """Sampling according to Table 1 in manuscript:
    - uniform point positioning (x) in [0,1] for each dimension
    - uniform eigenvalues in [-1,1]
    - ortho-normal basis/matrix (eigvecs) of eigenvectors
    :param nb_datapoints: how many data points to sample
    :param dim: dimensionality of SDP sub-problem to sample
    :param savefile: file to save sampled data in
    :param rand_seed: random seed, pre-set to 7
    :return: None
    """
    np.random.seed(rand_seed)
    X = cvx.Variable(dim, dim)
    data_points = []
    t_init = timer()
    t0 = timer()
    for data_pt_nb in range(1, nb_datapoints + 1):
        # ortho-normal basis/matrix (eigvecs) of eigenvectors
        eigvecs = ortho_group.rvs(dim)
        # uniform eigenvalues in [-1,1]
        eigvals = np.random.uniform(-1, 1, dim).tolist()
        # construct sampled Q from eigen-decomposition
        Q = np.matmul(np.matmul(eigvecs, np.diag(eigvals)),
                      np.transpose(eigvecs))
        # uniform point positioning (x) in [0,1] for each dimension
        x = np.random.uniform(0, 1, dim).tolist()
        # construct cvx SDP sub-problem
        obj_sdp = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(Q, X)))
        constraints = [
            cvx.lambda_min(
                cvx.hstack(cvx.vstack(1, np.array(x)),
                           cvx.vstack(cvx.vec(x).T, X))) >= 0,
            *[X[ids, ids] <= x[ids] for ids in range(0, dim)]
        ]
        prob = cvx.Problem(obj_sdp, constraints)
        # solve it using Mosek SDP solver
        prob.solve(verbose=False, solver=cvx.MOSEK)
        # store upper triangular matrix in array (row major) since it is symmetric
        Q = np.triu(Q, 1) + np.triu(Q, 0)
        # save eigendecomposition, point positioning, matrix Q and solution of SDP sub-problem
        data_points.append([
            *eigvecs.T.flatten(), *eigvals, *x, *list(Q[np.triu_indices(dim)]),
            obj_sdp.value
        ])
        # save to file and empty data_points buffer every 1000 entries
        if not data_pt_nb % 1000:
            t1 = timer()
            with open(savefile, "a") as f:
                for line in data_points:
                    f.write(",".join(str(x) for x in line) + "\n")
                data_points = []
                print(
                    str(data_pt_nb) + " " + str(dim) + "D points, time = " +
                    str(t1 - t0) + "s" + ", total = " + str(t1 - t_init) + "s")
                t0 = t1
Esempio n. 50
0
def kernel_group_lasso(kDD,
                       kDx,
                       size_groups=None,
                       l=0.01,
                       max_iters=10000,
                       eps=1e-4,
                       gpu=False):
    """Solve Kernel Group LASSO problem via CVXPY with large scale SCS solver.

    :param kDD: Dictionary-vs-dictionary Gram matrix, positive semidefinite (d x d)
    :param kDx: Dictionary-vs-input Gram vector (d x 1)
    :param size_groups: List of size of groups
    :param l: Regularization parameter
    :param max_iters: Iteration count limit
    :param eps: Convergence tolerance
    :type kDD: np.ndarray
    :type kDx: np.ndarray
    :type num_groups: int
    :type l: float
    :type max_iters: int
    :type eps: float

    References:
        [1] `Jeni, László A., et al. "Spatio-temporal event classification using time-series kernel based structured sparsity."
        <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5098425/>`_.
    """

    assert (isinstance(kDD, np.ndarray) and kDD.ndim == 2
            and kDD.shape[0] == kDD.shape[1])  #, (kDD.shape, kDx.shape))
    assert (isinstance(kDx, np.ndarray) and kDx.ndim == 1
            and kDx.shape[0] == kDD.shape[0])  #, (kDD.shape, kDx.shape))
    if size_groups is None:
        size_groups = [1] * kDD.shape[0]
    assert (np.sum(size_groups) == len(kDx))
    assert (l >= 0)
    cumsum = np.cumsum(size_groups)
    alpha = cvxpy.Variable(kDD.shape[0])
    obj = cvxpy.Minimize( 0.5 * cvxpy.quad_form(alpha, kDD) \
                          - cvxpy.sum_entries(cvxpy.mul_elemwise(kDx, alpha)) \
                          + l * cvxpy.norm(cvxpy.vstack(*[np.sqrt(e - s) * cvxpy.norm(alpha[s:e]) for (s, e) in zip(np.concatenate([np.array([0]), cumsum[:-1]]), cumsum)]), 1)
    )

    prob = cvxpy.Problem(obj)
    prob.solve(solver='SCS',
               max_iters=max_iters,
               verbose=True,
               eps=eps,
               gpu=gpu)
    a = np.asarray(alpha.value)
    g = np.asarray([
        (1. / np.sqrt(e - s)) * np.linalg.norm(a[s:e])
        for (s, e) in zip(np.concatenate([np.array([0]), cumsum[:-1]]), cumsum)
    ])
    return a, g
Esempio n. 51
0
def get_C_hat_transpose():
    probs = []
    net.eval()
    for batch_idx, (data,
                    target) in enumerate(train_gold_deterministic_loader):
        # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
        data, target = V(data.cuda(), volatile=True),\
                       V((target - num_classes).cuda(), volatile=True)

        # forward
        output = net(data)
        pred = F.softmax(output)
        probs.extend(list(pred.data.cpu().numpy()))

    probs = np.array(probs, dtype=np.float32)
    C_hat = np.zeros((num_classes, num_classes))
    for label in range(num_classes):
        indices = np.arange(len(train_data_gold.train_labels))[np.isclose(
            np.array(train_data_gold.train_labels) - num_classes, label)]
        C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)

    import cvxpy

    base_rate_clean = [0] * num_classes
    base_rate_corr = [0] * num_classes
    for label in range(num_classes):
        base_rate_clean[label] = sum(
            np.array(train_data_gold.train_labels) == label)
        base_rate_corr[label] = sum(
            np.array(train_data_silver.train_labels) == label)
    base_rate_clean = np.array(base_rate_clean).reshape(
        (1, -1)) / len(train_data_gold.train_labels)
    base_rate_corr = np.array(base_rate_corr).reshape(
        (1, -1)) / len(train_data_silver.train_labels)

    print(base_rate_clean)
    print(base_rate_corr)

    C_hat_better = cvxpy.Variable(num_classes, num_classes)
    objective = cvxpy.Minimize(
        1e-2 * cvxpy.sum_squares(C_hat_better - C_hat) / num_classes +
        cvxpy.sum_squares(base_rate_clean * C_hat_better - base_rate_corr))

    constraints = [
        0 <= C_hat_better, C_hat_better <= 1,
        1 == cvxpy.sum_entries(C_hat_better, axis=1)
    ]

    prob = cvxpy.Problem(objective, constraints)
    prob.solve()

    C_hat = np.array(C_hat_better.value)

    return C_hat.T.astype(np.float32)
Esempio n. 52
0
 def max_request_forwarded(self):
     objective = cp.Maximize(
         cp.sum_entries(cp.sum_entries(np.sum(
             map(lambda i: cp.mul_elemwise(self.A[i].T, self.x_bar[i]),
                 range(self.S))),
                                       axis=0),
                        axis=1))
     capacityConstrain = cp.sum_entries(
         np.sum(
             map(lambda i: cp.mul_elemwise(self.A[i], self.x_bar[i].T),
                 range(self.S))),
         axis=1) <= self.C  #row_sums, if axis=0 it would be a column sum
     constraints = [capacityConstrain]
     for i in range(self.S):
         r1 = cp.sum_entries(self.x_bar[i], axis=1) <= self.avg_D[i]
         constraints.append(r1)
         r2 = self.x_bar[i] >= 0.0
         constraints.append(r2)
     lp1 = cp.Problem(objective, constraints)
     result = lp1.solve()
Esempio n. 53
0
    def train(self):
        '''Optimize the asymmetric dual problem and return optimal w and b.'''
        if not self.training_instances:
            raise ValueError('Must set training instances before training')

        if isinstance(self.training_instances, List):
            y, X = sparsify(self.training_instances)
            y, X = np.array(y), X.toarray()
        else:
            X, y = self.training_instances.numpy()

        i_neg = np.array([ins[1] for ins in zip(y, X) if ins[0] == self.negative_classification])
        i_pos = np.array([ins[1] for ins in zip(y, X) if ins[0] == self.positive_classification])
        # centroid can be computed in multiple ways
        n_centroid = np.mean(i_neg)
        Mk = ((1 - self.c_delta * np.fabs(n_centroid - i_pos) /
               (np.fabs(n_centroid) + np.fabs(i_pos))) *
              ((n_centroid - i_pos) ** 2))
        Zks = np.zeros_like(i_neg)
        Mk = np.concatenate((Mk, Zks))
        TMk = np.concatenate((n_centroid - i_pos, Zks))
        ones_col = np.ones((i_neg.shape[1], 1))
        pn = np.concatenate((i_pos, i_neg))
        pnl = np.concatenate((np.ones(i_pos.shape[0]), -np.ones(i_neg.shape[0])))
        col_neg, row_sum = i_neg.shape[1], i_pos.shape[0] + i_neg.shape[0]

        # define cvxpy variables
        w = Variable(col_neg)
        b = Variable()
        xi0 = Variable(row_sum)
        t = Variable(row_sum)
        u = Variable(row_sum, col_neg)
        v = Variable(row_sum, col_neg)

        constraints = [xi0 >= 0,
                       xi0 >= 1 - mul(pnl, (pn * w + b)) + t,
                       t >= mul(Mk, u) * ones_col,
                       mul(TMk, (-u + v)) == 0.5 * (1 + pnl) * w.T,
                       u >= 0,
                       v >= 0]

        # objective
        obj = cvx.Minimize(0.5 * (cvx.norm(w)) + self.c * cvx.sum_entries(xi0))
        prob = cvx.Problem(obj, constraints)

        if OPT_INSTALLED:
            prob.solve(solver='MOSEK')
        else:
            prob.solve()

        self.weight_vector = np.asarray(w.value.T)[0]
        print(
            "weight vec calculated in svm restrained learner: {}".format(self.weight_vector.shape))
        self.bias = b.value
Esempio n. 54
0
def create(m, n):
    np.random.seed(0)

    x0 = np.random.randn(n)
    A = np.random.randn(m,n)
    A = A*sp.diags([1 / np.sqrt(np.sum(A**2, 0))], [0])
    b = A.dot(x0) + np.sqrt(0.01)*np.random.randn(m)
    b = b + 10*np.asarray(sp.rand(m, 1, 0.05).todense()).ravel()

    x = cp.Variable(n)
    return cp.Problem(cp.Minimize(cp.sum_entries(cp.huber(A*x - b))))
Esempio n. 55
0
def StaticSolution(C, lambda_var, M_t, s_t, alpha_t, sigma_t):
    # CHANGE IT TO REFLECT TEXT, DON'T SHIP IT
    """Compute the static solution given the problem parameters."""
    T = len(s_t)
    if (np.all(s_t == s_t[0])):  # constant spread
        return np.diff(np.concatenate([M_t, [1.]])) * C
    import cvxpy as cvx
    u = cvx.Variable(T)
    U = cvx.Variable(T)
    objective = cvx.square(u).T * (s_t * alpha_t / 2.) - np.sign(C) * u.T * s_t / 2. + \
        lambda_var * (C**2) * (cvx.square(U).T * (sigma_t**2) -
                               2 * U.T * (sigma_t**2 * M_t))
    constraints = []
    for t in range(T):
        constraints += [U[t] == cvx.sum_entries(u[:t]) / C]
    constraints += [cvx.sum_entries(u) == C]
    constraints += [C * u >= 0]
    problem = cvx.Problem(cvx.Minimize(objective), constraints)
    problem.solve()
    return u.value.A1
Esempio n. 56
0
    def train(self, x, y, x_star=None, gamma=1.0, C=1.0):

        # ensure labels are [-1, 1]
        y[y == 0] = -1
        assert np.unique(y).tolist() == [-1, 1]

        # if x* not supplied just make it ones
        if x_star is None:
            x_star = np.ones([x.shape[0], 1])

        # regularization parameter
        g = cvx.Parameter(sign="positive")
        g.value = gamma
        c = cvx.Parameter(sign="positive")
        c.value = C

        # define model variables
        w = cvx.Variable(x.shape[1])
        b = cvx.Variable()
        w_star = cvx.Variable(x_star.shape[1])
        d = cvx.Variable()

        # define objective
        obj = cvx.Minimize(
            0.5 * g * cvx.sum_squares(w) / x.shape[1] +
            0.5 * g * cvx.sum_squares(w_star) / x_star.shape[1] +
            c * cvx.sum_entries(x_star * w_star - d) / x.shape[0])

        # define constraints
        constraints = [
            cvx.mul_elemwise(y, (x * w - b)) >= 1 - (x_star * w_star - d),
            (x_star * w_star - d) >= 0
        ]

        # form problem and solve
        prob = cvx.Problem(obj, constraints)
        prob.solve()

        try:

            # throw error if not optimal
            assert prob.status == 'optimal'

            # save model parameters
            self.w = np.array(w.value)
            self.b = np.array(b.value)

            # return success
            return True

        except:

            # return failure
            return False
Esempio n. 57
0
    def solveStep(self, agg_point, solver='ECOS'):
        """
        takes the aggregated set point and
        outputs power operating point for each resource and objective value
        """
        # number of resources
        N = self.N

        # define cvx variables
        p = cvx.Variable(N)
        eps = cvx.Variable(1)

        # define aggregate tracking objective and constraint
        obj = [self.mu * eps]
        constraints = [
            cvx.sum_entries(p) <= agg_point + eps,
            agg_point - eps <= cvx.sum_entries(p), eps >= 0
        ]

        # gather all resources objective function and constraints
        for i in range(N):
            obj_part = self.resource_list[i].costFunc(p[i])
            obj.append(obj_part)

            constraints_part = self.resource_list[i].convexHull(p[i])
            constraints.extend(constraints_part)

        # form and solve problem
        obj_final = cvx.Minimize(sum(obj))
        prob = cvx.Problem(obj_final, constraints)
        prob.solve(solver=solver)

        if prob.status != 'optimal':
            print('Problem status is: ', prob.status)
            p_out = p.value
        else:
            p_out = p.value

        self.p_requested = p_out.A1
        self.eps = eps.value
        self.prob_val = prob.value
Esempio n. 58
0
def efficeint_frontier_solver(data, sample=500):
    # data = data_delta
    # 算出有效边界的顶点
    n = len(data.columns)
    w = cvx.Variable(n)
    return_vec = data.values.T
    mu = np.asmatrix(data.mean()).T
    ret = mu.T * w
    C = np.asmatrix(np.cov(return_vec))
    risk = cvx.quad_form(w, C)
    prob0 = cvx.Problem(cvx.Minimize(risk), 
                   [cvx.sum_entries(w) == 1, 
                    w >= 0,
                    ])
    prob0.solve()
    # print(w0.value)
    mu_min = ret.value

    risk_data = []
    ret_data = []
    weight_data = []
    # 沿着有效边界的顶点向右移动
    delta = cvx.Parameter(sign='positive')
    prob = cvx.Problem(cvx.Minimize(risk), 
           [cvx.sum_entries(w) == 1, 
            w >= 0,
            ret == mu_min + delta])

    for i in np.linspace(0, 5, sample):
        delta.value = i
        prob.solve()
        risk_min = cvx.sqrt(risk).value
        if risk_min == float('inf') or risk_min is None:
            break
        risk_data.append(cvx.sqrt(risk).value)
        # print(cvx.sqrt(risk).value)
        ret_data.append(ret.value)
        weight_data.append(w.value)
        # print(ret.value)
        # print(w.value)
    return risk_data, ret_data, weight_data
Esempio n. 59
0
def simultaneous_planning_cvx(S,
                              A,
                              D,
                              t_max=2000,
                              delta=5,
                              file_suffix='',
                              save_dir=''):

    # Setup problem parameters
    # make p_tau uniform between 500 and 2000
    p_tau = np.ones(t_max)
    p_tau[:5] = 0
    p_tau = p_tau / np.sum(p_tau)

    n_dict_elem = D.shape[1]

    # compute variance of dictionary elements.
    stas_norm = np.expand_dims(np.sum(A**2, 0), 0)  # 1 x # cells
    var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D)))  # # dict

    # Construct the problem.

    y = cvxpy.Variable(n_dict_elem, t_max)

    x = cvxpy.cumsum(y, 1)
    S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
    objective = cvxpy.Minimize((cvxpy.sum_entries(
        (S_expanded - A * (D * x))**2, 0) + var_dict * x) * p_tau)
    constraints = [
        0 <= y,
        cvxpy.sum_entries(y, 0).T <= delta * np.ones((1, t_max)).T
    ]
    prob = cvxpy.Problem(objective, constraints)

    # The optimal objective is returned by prob.solve().
    result = prob.solve(verbose=True)
    # The optimal value for x is stored in x.value.
    print(x.value)
    # The optimal Lagrange multiplier for a constraint
    # is stored in constraint.dual_value.
    print(constraints[0].dual_value)