Ejemplo n.º 1
0
    def __init__(self, obstacle, resolution=200):
        self.w_t, self.w_j = (cvx.Parameter(nonneg=True) for _ in range(2))
        self.w_t.value = W_T
        self.w_j.value = W_J

        self.resolution = resolution

        self.alpha = cvx.Variable(resolution + 1)
        self.beta = cvx.Variable(resolution + 1)

        delta_s = (S_F - S_0) / resolution
        self.delta_s = delta_s
        self.J_t = 2 * sum(
            delta_s * inv_pos(sqrt(self.beta)[:-1] + sqrt(self.beta)[1:]))
        self.J_s = sum(
            ((self.alpha[1:] - self.alpha[:-1]) / delta_s)**2 * delta_s)

        self.dynamic_model = [
            (self.beta[1:] - self.beta[:-1]) / delta_s == 2 * self.alpha[-1]
        ]

        t_corner = obstacle.bottom_left_corner[1] + obstacle.height
        s_corner = obstacle.bottom_left_corner[0]
        index_s = int(math.ceil((s_corner - S_0) / delta_s))
        self.obstacle_constraint = (2 * sum(delta_s * inv_pos(
            sqrt(self.beta)[:(index_s - 1)] + sqrt(self.beta)[1:index_s])) >=
                                    t_corner)

        self.friction_circle_constraints = [
            self.beta <= (V_MAX**2), self.alpha <= A_MAX
        ]
        self.initial_condition = [
            self.beta[0] == (V_0**2), self.alpha[0] == A_0
        ]
Ejemplo n.º 2
0
def c():
    x = cp.Variable()
    y = cp.Variable()
    obj = cp.Minimize(0)
    constraints = [x >= 0, y >= 0, 1/x + 1/y <=1]
    problem = cp.Problem(obj, constraints)
    print(f"c before: {problem.is_dcp()}")

    x = cp.Variable(nonneg=True)
    y = cp.Variable(nonneg=True)
    obj = cp.Minimize(0)
    constraints = [cp.inv_pos(x) + cp.inv_pos(y) <=1, x >= 0, y >= 0]
    problem = cp.Problem(obj, constraints)
    print(f"c after: {problem.is_dcp()}")
    problem.solve()
Ejemplo n.º 3
0
    def updateEi2(self,sigmai,ei_):
        sigmai=np.array(sigmai)
        ei_=np.array(ei_)
        ei=cvx.Variable(I)
        si=cvx.Variable(I)
        constraints=[0<=si]
        slacost=cvx.sum_entries(cvx.mul_elemwise(self.lamb,cvx.inv_pos(self.mu-cvx.mul_elemwise(self.lamb,cvx.inv_pos(self.S-si)))))
        watcost=np.array(self.omegawat)*si
        for i in range(I):
            constraints+=[ei[i]==self.gamma[i]*si[i]]
        objective = cvx.Minimize(-np.transpose(sigmai)*ei+self.omegadc*(slacost+watcost)+rho/2*cvx.sum_squares(ei-np.array(ei_)))
        prob = cvx.Problem(objective, constraints)
        result = prob.solve(solver=slv)

        #print ei.value.A1
        #print result
        self.e=ei.value.A1
        mins=self.minServer()
        maxe=np.zeros(I)
        for i in range(I):
            maxe[i]=(self.S[i]-mins[i])*self.gamma[i]
            if self.e[i]<0:
                self.e[i]==0
            else:
                if self.e[i]>maxe[i]:
                    self.e[i]=maxe[i]
                    print('\nWarning: reducing si lower than minimum active server!!!!')
        #print("ei:",self.e)

        self.setSwitchSever(self.e)
Ejemplo n.º 4
0
def DFFweights(A, B):
    """
    This function minimizes (total variance(B/mean(A*x)) where A.shape (m x n), X.shape (n x k), and B.shape (m x k)
    where m = image flattened in row-major order, n = number of EigenFlats, and k = observations.
    A is the array of EigenFlats, X is the coefficients for each EigenFlat, B is the observation to correct.
    """
    k = B.shape[1]
    m = B.shape[0]
    n = A.shape[1]

    # Preallocate the empty arrays
    coeff = np.arange(0, n * k, 1, dtype='f8').reshape(n, k) * 0

    b = cp.Parameter(m, nonneg=True)
    x = cp.Variable(n, nonneg=True)

    totalVariance = cp.tv((b * cp.inv_pos(cp.sum(A * x))))
    objective = cp.Minimize(totalVariance)
    prob = cp.Problem(objective)

    # Loop through all spectra and find optimal solutions with the warm start
    for ind in range(0, k):
        b.value = B[:, ind]
        loss = prob.solve()
        coeff[:, ind] = x.value

    return coeff
Ejemplo n.º 5
0
def SFCLS_unmix(A, b, lam=1e-6):
    A = A
    m = A.shape[0]
    n = A.shape[1]

    x = cp.Variable(n)
    c = cp.Parameter(n)
    objective = cp.Minimize(
        cp.sum_squares(A * x - b) + lam * cp.inv_pos(c * x))
    constraints = [x >= 0, cp.sum(x) == 1]
    prob = cp.Problem(objective, constraints)
    # iterate over n convex programs
    temp_loss = np.zeros(n)
    for i in range(n):
        c_new = np.zeros(n)
        c_new[i] = 1
        c.value = c_new
        temp_loss[i] = prob.solve()

    # choose index with minimum loss
    i_min = temp_loss.argmin()
    c_new = np.zeros(n)
    c_new[i_min] = 1
    c.value = c_new
    loss = prob.solve()
    return x.value, loss
Ejemplo n.º 6
0
    def constr(self, x, phi, log_cash):
        def to_constant(x):
            """return violation if constant, constraint if variable"""
            return -x.value if x.is_constant() else x >= 0

        expr1 = cvx.log(sum(a - a*b*cvx.inv_pos(x[g] + b)
                            for g, a, b in izip(self.goods, self.a, self.b)))
        return [to_constant(expr1 - np.log(a) + cvx.log(x[g] + b) + phi[g] - log_cash)
                for g, a, b in izip(self.goods, self.a, self.b)]
 def convertToHyperrectangle(self, t):
     """
     Find a minimum-volume hyperrectangle that outer bounds the ellipsoid
     {x : x^T*P*x <= t}
     
     Parameters
     ----------
     t : float
         Right hand side of the inequality ellipsoid definition, which
         scales the ellipsoid.
     
     Returns
     -------
     H : Hyperrectangle
         Smallest hyperrectangle that contains the ellipsoid.
     """
     S = self.S
     m = S.shape[0]
     # Find upper bound
     u = cvx.Variable(m)
     y = cvx.Variable(m)
     cost = cvx.Minimize(sum(u))
     constraints  = [cvx.inv_pos(4*y[i])*S[i,i]+y[i]*t<=u[i] for i in range(m)]
     constraints += [y >= 0]
     problem = cvx.Problem(cost, constraints)
     problem.solve(**global_vars.SOLVER_OPTIONS)
     u = np.array(u.value.T).flatten()
     # Find lower bound
     l = cvx.Variable(m)
     cost = cvx.Maximize(sum(l))
     constraints  = [-cvx.inv_pos(4*y[i])*S[i,i]-y[i]*t>=l[i] for i in range(m)]
     constraints += [y >= 0]
     problem = cvx.Problem(cost, constraints)
     problem.solve(**global_vars.SOLVER_OPTIONS)
     l = np.array(l.value.T).flatten()
     # Make the hyperrectangle
     H = Hyperrectangle(l,u)
     return H
Ejemplo n.º 8
0
def optimize(path, params):
    """
    main function to optimize trajectory
    solves convex optimization
    """

    theta = path['theta']
    dtheta = path['dtheta']
    S = path['S']
    S_prime = path['S_prime']
    S_dprime = path['S_dprime']
    num_wpts = theta.size

    # opt vars
    A = cv.Variable((num_wpts - 1))
    B = cv.Variable((num_wpts))
    U = cv.Variable((2, num_wpts - 1))

    cost = 0
    constr = []

    # no constr on A[0], U[:,0], defined on mid points

    # TODO: constr could be vectorized?

    constr += [B[0] == 0]
    for j in range(num_wpts - 1):

        cost += 2 * dtheta * cv.inv_pos(
            cv.power(B[j], 0.5) + cv.power(B[j + 1], 0.5))

        R, M, C, d = dynamics_cvx(S_prime[:, j], S_dprime[:, j], params)
        constr += [R * U[:, j] == M * A[j] + C * ((B[j] + B[j + 1]) / 2) + d]
        constr += [B[j] >= 0]
        constr += [cv.norm(U[:, j], 2) <= params['Fmax']]
        constr += [U[0, j] <= params['Flongmax']]
        constr += [B[j + 1] - B[j] == 2 * A[j] * dtheta]

    # problem_define_time = time.time()
    problem = cv.Problem(cv.Minimize(cost), constr)
    # problem_define_done = time.time()
    solution = problem.solve(solver=cv.MOSEK, verbose=False)
    # problem_solve_done = time.time()
    B, A, U = B.value, A.value, U.value
    B = abs(B)

    vopt, topt = simulate(B, A, U, path, params)
    # cvx_simulate_done_time = time.time()
    # print('Problem defn time: ' + str(problem_define_done - problem_define_time) + ', problem solve time: ' + str(problem_solve_done - problem_define_done) + ', cvx sim time: ' + str(cvx_simulate_done_time - problem_solve_done))
    return B, A, U, vopt, topt
Ejemplo n.º 9
0
    def _get_f_error(self, idx):
        # get relevant coefficients for desired index set
        coeffs = self._coeffs[idx].cpu().numpy()
        coeffs[coeffs < 0.0] = 0.0

        # define variables and parameters
        num_var = coeffs.shape[0]
        x_arg = cp.Variable(num_var)
        alpha = cp.Parameter(num_var, nonneg=True)
        alpha.value = coeffs

        # construct symbolic error vector for theoretical error per filter
        k_constant = 3
        expr = cp.vstack([
            cp.multiply(cp.inv_pos(x_arg), alpha / k_constant),
            cp.multiply(cp.inv_pos(cp.sqrt(x_arg)),
                        cp.sqrt(6 * alpha / k_constant)),
        ])
        f_error = cp.norm(expr, axis=0) + cp.multiply(cp.inv_pos(x_arg),
                                                      alpha / k_constant)
        f_error = 1 / 2 * f_error

        # return argument and symbolic error function to argument
        return x_arg, f_error
Ejemplo n.º 10
0
    def gstar(self, x, dh):

        # This corresponds to Step 3 of Algprithm 1 DSLEA and corresponds to the primal problem with linearized
        # concave part. See detailed comments for computation in function dh.
        #
        # Instead of numpy, we use expressions from cvxpy, which are equivalent

        var_in_inverse = cp.diag(cp.inv_pos(self.var_in))
        # vec_exp = cp.exp(cp.matmul(cp.matmul(cp.transpose(self.kWeightsTop), var_in_inverse), (x-self.mean_in))
        #                  + cp.transpose(self.kBiasTop))
        # return cp.log(cp.sum(vec_exp)) - self.mean_out / self.var_out - cp.transpose(x)@dh
        return cp.log_sum_exp(
            cp.matmul(
                cp.matmul(cp.transpose(self.kWeightsTop), var_in_inverse),
                (x - self.mean_in)) + self.kBiasTop
        ) - self.mean_out / self.var_out - cp.transpose(x) @ dh
Ejemplo n.º 11
0
def optimize(path, params, plot_results, print_updates):
    """ main function to solve convex optimization
    """
    theta = path['theta']
    dtheta = path['dtheta']
    S = path['S']
    S_prime = path['S_prime']
    S_dprime = path['S_dprime']
    num_wpts = theta.size

    # opt vars
    A = cv.Variable((num_wpts - 1))
    B = cv.Variable((num_wpts))
    U = cv.Variable((2, num_wpts - 1))

    cost = 0
    constr = []

    # no constr on A[0], U[:,0], defined on mid points
    constr += [B[0] == 0]

    for j in range(num_wpts - 1):

        cost += 2 * dtheta * cv.inv_pos(
            cv.power(B[j], 0.5) + cv.power(B[j + 1], 0.5))

        R, M, C, d = dynamics_cvx(S_prime[:, j], S_dprime[:, j], params)
        constr += [R * U[:, j] == M * A[j] + C * ((B[j] + B[j + 1]) / 2) + d]
        constr += [B[j] >= 0]
        constr += [cv.norm(U[:, j], 2) <= params['Fmax']]
        constr += [U[0, j] <= params['Flongmax']]
        constr += [B[j + 1] - B[j] == 2 * A[j] * dtheta]

    problem = cv.Problem(cv.Minimize(cost), constr)
    solution = problem.solve(solver=cv.ECOS, verbose=False)

    B, A, U = B.value, A.value, U.value
    B = abs(B)
    vopt, topt = simulate(B,
                          A,
                          U,
                          path,
                          params,
                          plot_results=plot_results,
                          print_updates=print_updates)
    return B, A, U, vopt, topt
 def txprocdurtrans_time_model(self, s_id, datasize, comp_list, num_itres):
   #datasize: MB, bw: Mbps, proc: Mbps
   tx_t = (8*datasize)*cp.inv_pos(BWREGCONST*self.s_bw[s_id, 0]) # sec
   numitfuncs = len(comp_list)
   quadoverlin_vector = expr((numitfuncs, 1))
   for i, comp in enumerate(comp_list):
     quadoverlin = comp*cp.quad_over_lin(self.s_n[s_id, i], self.s_proc.get((s_id, 0)) )
     quadoverlin_vector.set_((i, 0), quadoverlin)
   #
   quadoverlin_ = (quadoverlin_vector.agg_to_row()).get((0,0))
   
   # proc_t = num_itres* (8*datasize)*(quadoverlin_) # sec
   proc_t = (8*datasize)*(quadoverlin_) # sec
   stage_t = 0 #self.s_dur.get((s_id, 0))
   #trans_t = cp.max(tx_t, proc_t)
   trans_t = tx_t + proc_t #+ stage_t
   
   return [tx_t, proc_t, stage_t, trans_t]
Ejemplo n.º 13
0
def adaptive_stim_4(model,
                    metrics,
                    ntrials_per_phase,
                    n_amps=38,
                    n_trials_max_global=25,
                    use_decoder=False):

    probs_smoothen_recons = model.probs_smoothen_recons
    ntrials_cumsum = metrics.ntrials_cumsum
    probs_variance = model.probs_variance

    # Solve a convex problem!
    n_elecs = probs_smoothen_recons.shape[0]
    var_elec_amp_cell = probs_variance  # probs_smoothen_recons * (1 - probs_smoothen_recons)
    if use_decoder:
        # use decoder
        print('Using decoder')
        decoder = model.decoder
        dec_energy = np.sum(decoder**2, 0)
        var_elec_amp_cell = var_elec_amp_cell * dec_energy

    var_elecs_amp = np.nansum(var_elec_amp_cell, 2)
    var_elecs_amp = np.ndarray.flatten(var_elecs_amp)
    print('scaled up the variance')
    var_elecs_amp *= np.ndarray.flatten(ntrials_cumsum)
    print(
        'Added extra statement :  var_elecs_amp = np.minimum(var_elecs_amp, 0.00000000001)'
    )
    var_elecs_amp = np.minimum(var_elecs_amp, 0.000000001)

    T = cp.Variable(n_elecs * n_amps)
    objective = cp.Minimize(
        cp.sum(var_elecs_amp *
               cp.inv_pos(T + np.ndarray.flatten(ntrials_cumsum))))
    constraints = [cp.sum(T) <= ntrials_per_phase * n_elecs * n_amps, 0 <= T]
    prob = cp.Problem(objective, constraints)
    result = prob.solve()
    trial_elec_amps = (T.value).astype(np.int)

    trial_elec_amps = np.reshape(trial_elec_amps, [n_elecs, n_amps])
    trial_elec_amps[np.isnan(trial_elec_amps)] = 0

    return trial_elec_amps
Ejemplo n.º 14
0
def sumcvxp(graph, testset, priorities, debug=None):
    """
    Generates and solves the Cohen et al. SUM objective convex program
    for the provided list of tests and dictionary of edge priorities.

    :param graph: topology, a NetworkX graph object
    :param testset: a list of probing tests, each a list of probing paths
    :param priorities: a dictionary mapping edges to their priorities.
                       We assume that priorities are scaled such that sum(priorities.values()) == 1.

    :return: list q of length m=len(testset), where, for each timestep, q[i] is the probability that test[i] should be probed
    """
    m = len(testset)

    # produce a dictionary tests_for_edge that maps edges to a list of indices corresponding to tests in the testset list that contain that edge
    # as a dictionary, keys are unordered and iteration through it needs to be made predictable (later in code)
    # the test indices are iterated in ascending order, and so each edge's list is ordered
    edges_in_tests = edgeset_per_test(testset, graph.is_directed())
    tests_for_edge = dict()
    for i in range(m):
        for edge in edges_in_tests[i]:
            tests_for_edge.setdefault(edge, list()).append(i)

    Q = cp.Variable(m)
    constraints = [Q >= 0, cp.sum(Q) == 1]
    # below, the inline interation through keys of tests_for_edge is done through a sorted list of those keys so that the constraint is formulated in a deterministic way
    objective = cp.Minimize(
        cp.sum([
            priorities[e] *
            cp.inv_pos(cp.sum([Q[i] for i in tests_for_edge[e]]))
            for e in sorted(tests_for_edge)
        ]))

    prob = cp.Problem(objective, constraints)
    prob.solve()
    if debug is not None:
        debug['constraints'] = constraints
        debug['objective'] = objective
        debug['Q'] = Q
    return list(Q.value)
Ejemplo n.º 15
0
def e():
    x = cp.Variable()
    y = cp.Variable()
    obj = cp.Minimize(0)
    constraints = [
            x * y >= 1,
            x >= 0,
            y >= 0
            ]
    problem = cp.Problem(obj, constraints)
    print(f"e before: {problem.is_dcp()}")

    x = cp.Variable()
    y = cp.Variable()
    obj = cp.Minimize(0)
    constraints = [
            cp.inv_pos(y) - x <= 1,
            x >= 0,
            ]
    problem = cp.Problem(obj, constraints)
    print(f"e after: {problem.is_dcp()}")
    problem.solve()
Ejemplo n.º 16
0
def adaptive_stim(model,
                  metrics,
                  ntrials_per_phase,
                  n_amps=38,
                  n_trials_max_global=25):

    probs_est = model.probs_est
    probs_smoothen_recons = model.probs_smoothen_recons
    ntrials_cumsum = metrics.ntrials_cumsum

    # Solve a convex problem!
    var_elec_cell = np.nansum(probs_est * (1 - probs_est), 1)
    var_elecs = np.nansum(var_elec_cell, 1)
    T = cp.Variable(512)
    objective = cp.Minimize(
        cp.sum(var_elecs * cp.inv_pos(T + np.sum(ntrials_cumsum, 1) / n_amps)))
    constraints = [cp.sum(T) <= ntrials_per_phase * 512, 0 <= T]
    prob = cp.Problem(objective, constraints)
    result = prob.solve(verbose=False)
    n_trials_per_elec = (T.value)
    print('# Trials per electrode computed')

    # Convert # stimulations per electrode to elec_amp
    diff = np.abs(probs_est - probs_smoothen_recons)**2
    diff = np.sum(diff, 2)  # collapse across cells
    diff = diff / np.expand_dims(np.sum(diff, 1), 1)  # normalize
    trial_elec_amps = np.zeros((512, n_amps))
    for ielec in range(512):
        if n_trials_per_elec[ielec] > 0:
            trial_elec_amps[ielec, :] = np.round(
                n_amps * diff[ielec, :n_amps] * n_trials_per_elec[ielec])

    trial_elec_amps = np.maximum(trial_elec_amps, 0)
    # replace nans with 0
    trial_elec_amps[np.isnan(trial_elec_amps)] = 0

    return trial_elec_amps
    2.50760010e+03, 2.07638980e+03, 5.81797641e+03, 1.08570667e+04,
    5.12726392e+03, 1.00000000e+00, 6.58929541e+03, 7.09769073e+03,
    3.34303516e+03, 2.66082789e+03, 2.77756166e+03, 1.77592755e+03,
    3.07584351e+03, 2.35098041e+03, 7.04177079e+03, 3.03844715e+03,
    3.51614270e+03, 2.06672946e+03, 1.39886981e+03, 1.17892981e+03,
    1.16711613e+03, 2.99732387e+03, 1.50369037e+03, 2.52066711e+03,
    1.05911714e+04, 3.21540906e+03, 2.98497302e+03, 7.70131934e+03,
    4.41149255e+03, 9.79452597e+03, 7.03271168e+03, 6.58929541e+03,
    1.00000000e+00, 7.13570746e+03, 9.24697217e+03, 1.40667535e+03,
    1.18594623e+03, 1.38497012e+03, 3.06499432e+03, 1.16380303e+04,
    5.08549064e+03, 6.19199273e+03, 6.66881592e+03, 9.94092682e+02,
    9.64287628e+02, 6.75036224e+02, 9.87007515e+02, 8.14448081e+02,
    1.40279779e+03, 1.00999878e+03, 6.06632482e+03, 3.18211285e+03,
    1.77704749e+03, 4.73147520e+03, 1.05468057e+04, 2.66577881e+03,
    1.25102759e+03, 7.09769073e+03, 7.13570746e+03, 1.00000000e+00
])
Iij = Iij / 1000
print(min(Iij), max(Iij))
# Rij = np.array([1,2,3,4,5])
# Iij = np.array([0.34, 0.1, 0.955, 0.3, 0.1])
dim = 676

c = cvx.Variable(dim)
constr = []
for i in range(dim):
    constr.append(c[i] >= Rij[i])
prob = cvx.Problem(cvx.Maximize(cvx.norm(cvx.multiply(Iij, cvx.inv_pos(c)))),
                   constr)

prob.solve(method="dccp", solver="MOSEK", verbose=True)
print(c.value)
# sliceArray = PTl[np.asmatrix([1,3,5]).T, [1,3,5]]
# pl.figure(figsize=(6, 6))
# fig = pl.figure()
# ax = fig.gca(projection='3d')
# ax.plot(xs=p.value[0,:].A1,ys=p.value[1,:].A1,zs=p.value[2,:].A1)
# legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), prop={'size': 18})
# print(dir(ct))
# raise Exception('exit')

import veh_speed_sched_data as ct

lowDiag = np.tril(np.ones((ct.n, ct.n)))
u = cv.Variable(ct.n)
obj = cv.Minimize(
    cv.sum_entries(
        cv.mul_elemwise(ct.d,
                        ct.a * cv.inv_pos(u) + ct.b + ct.c * u)))
cst = [
    ct.tau_min <= lowDiag * cv.mul_elemwise(ct.d, u),
    lowDiag * cv.mul_elemwise(ct.d, u) <= ct.tau_max, 1 / ct.smin >= u,
    u >= 1 / ct.smax
]
prb = cv.Problem(obj, cst)
prb.solve()
print(prb.status)
print('fuel', prb.value)

dAccu = lowDiag * ct.d

pl.step(np.vstack((0, dAccu)), np.vstack((0, 1 / u.value)))
Ejemplo n.º 19
0
de = 1 / 10
dehigh = 5 * de
m = np.real(np.max(
    np.linalg.eigvals(G)))  # Compute the maximum eigenvalue of the graph

bar = de / m
ba = 2 * bar
bs = 30 * ba / 100

alpha1 = 1 / (1 / (1 - dehigh) - 1 / (1 - de))
alpha2 = 1 / (1 / bs - 1 / ba)

# Geometric program
print(alpha1)
print(alpha2)

B = cp.Variable(shape=(N))
D = cp.Variable(N)
v = cp.Variable(N)
lamb = cp.Variable()
p = cp.Variable()

obj = cp.Minimize(alpha1 * cp.sum(cp.inv_pos(D)) +
                  alpha2 * cp.sum(cp.inv_pos(B)))
constraints = []
constraints.append((cp.diag(B) * G + cp.diag(D)) * v <= v)
constraints.append(v >= np.zeros(N))
prob = cp.Problem(obj, constraints)
print(prob.solve(gp=True))
Ejemplo n.º 20
0
E = np.load('./cvxpy/E.npy')
T = np.load('./cvxpy/T.npy')
A = np.load('./cvxpy/A.npy')

m, n = X_c.shape

I_n = np.ones(n)[:, np.newaxis]
I_m = np.ones(m)[:, np.newaxis]

C = np.array([25,50,25,25])[:,np.newaxis]

Y_c = np.dot(X_c, R_c)
# Matrix variable with shape X_c.shape.
X = cp.Variable(X_c.shape, boolean=True)

F1 = cp.multiply(X.T @ P, cp.inv_pos(U))
F2 = cp.multiply(X.T @ M, cp.inv_pos(E))
F3 = cp.multiply(X.T @ S, cp.inv_pos(T))
F = 1 / n * (cp.sum(F1) + cp.sum(F2) + cp.sum(F3))

V = cp.diag(Y_c @ A @ R_c.T @ X.T) @ Q

# objective = cp.Minimize((C.T @ cp.max(R_c.T @ X.T @ B, axis=1)) - 3000 * F)  can successfully run
# add expression V into objective(as below) will cause "Segmentation fault (core dumped)"
objective = cp.Minimize((C.T @ cp.max(R_c.T @ X.T @ B, axis=1)) + 20 * V - 3000 * F)
constraints = [X.T @ P <= U, X.T @ M <= E, X.T @ S <= T, X @ I_n == I_m]

start = datetime.now()
prob = cp.Problem(objective, constraints)
prob.solve(solver=cp.GUROBI)
print("status:", prob.status)
Ejemplo n.º 21
0
import numpy as np
import cvxpy as cp

n = 3
u = 2
gamma = 2
G = (np.ones((n, n)) - np.identity(n)) / 2
d = cp.Variable(n)
k = cp.Variable(n)
A = cp.Variable((n, n))
constraints = [
        d >= 0,
        k >= 0,
        A == cp.diag(d) + cp.diag(k) @ G,
        gamma * cp.sum(cp.inv_pos(d)) + cp.sum(cp.inv_pos(k)) <= u,
        ]
obj = cp.Minimize(cp.norm(A))
problem = cp.Problem(obj, constraints)
problem.solve()
print('status: ', problem.status)
print('optimal value: ', problem.value)
print("D: ")
print(np.diag(d.value))
print("K: ")
print(np.diag(k.value))
Ejemplo n.º 22
0
    'cars': 20,  # number of cars
    'timeRange': [18, 32],  # time range to run simulation
    'actRate': 1. / 4,  # step time in hours
    'chargeRate': 1. / 12,  # level 1 full charge rate
    'lambda': 1e-2,  # dual function weight
    'priceRise':
    0.,  # amount (as a decimal percent) which the price rises when all cars are being charged
    'beta': 1.0,  # exponentiate price by beta [CURRENTLY UNUSED]
    'mu_stCharge': 0.4,  # mean on starting charge level
    'std_stCharge': 0.1,  # standard deviation on starting charge level
    'mu_arr': 19,  # mean on arrival time
    'std_arr': 2,  # standard deviation on arrival time
    'mu_dep': 31,  # mean on departure time
    'std_dep': 1,  # standard deviation on departure time
    'terminalFunc': (
        lambda x: -cp.inv_pos(x)
    ),  # terminal reward function in terms of CVXPY atoms ### MUST BE SOCP-compatible for GUROBI
    'terminalFuncCalc': (lambda x: -1. / x),
}


def meanField(params=None, gmm=None, plotAgainstBase=False):

    if params is None:
        params = DEFAULT_PARAMS

    if gmm is None:
        gmm = makeModel(timeRange=params['timeRange'])

    eGMM = energyGMM(gmm, time_range=params['timeRange'])
Ejemplo n.º 23
0
tau_1 = cvx.Variable(N)
#tau_2 = cvx.Variable(N)
tau_2 = np.zeros(N)
print tau_2, b
print b.shape, tau_2.shape
#inv_alpha_1 = cvx.Variable(1)
inv_alpha_1 = 0.5

# specify objective function
#obj = cvx.Minimize(-cvx.sum_entries(cvx.log(tau_1)) - cvx.log_det(A - cvx.diag(tau_1)))
#obj = cvx.Minimize(-cvx.sum_entries(cvx.log(tau_1)) - cvx.log_det(A - cvx.diag(tau_1)))
# original
#obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(N*cvx.log(1/inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.sum_entries(cvx.square(tau_2)/tau_1) + inv_alpha_1*cvx.sum_entries(log_normcdf(tau_2*cvx.sqrt(1/(inv_alpha_1*tau_1)))) +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) )
# modifications
#obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(-N*cvx.log(inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.matrix_frac(tau_2, cvx.diag(tau_1)) + inv_alpha_1*cvx.sum_entries(log_normcdf(tau_2.T*cvx.inv_pos(cvx.sqrt(inv_alpha_1*tau_1)))) +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) )
obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(-N*cvx.log(inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.matrix_frac(tau_2, cvx.diag(tau_1)) + cvx.sum_entries(inv_alpha_1*log_normcdf(cvx.inv_pos(cvx.sqrt(inv_alpha_1*tau_1)))) )# +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) )


#def upper_bound_logpartition(tau, inv_alpha_1):
#    tau_1, tau_2 = tau[:D+N], tau[D+N:]
#    tau_1_N, tau_2_N = tau_1[D:], tau_2[D:]     # first D values correspond to w
#    alpha_1 = 1.0 / inv_alpha_1
#    inv_alpha_2 = 1 - inv_alpha_1
#    if np.any(tau_1 <= 0):
#        integral_1 = INF2
#    else:
#        integral_1 = inv_alpha_1 * (-0.5 * ((D+N)*np.log(alpha_1) + np.sum(np.log(tau_1)) ) \
#                        + np.sum(norm.logcdf(np.sqrt(alpha_1)*tau_2_N/np.sqrt(tau_1_N)))) \
#                        + 0.5 * np.sum(np.power(tau_2, 2) / tau_1)
#    mat = A - np.diag(tau_1)
#    sign, logdet = np.linalg.slogdet(mat)
Ejemplo n.º 24
0
import cvxpy as cp

# Create two scalar optimization variables.
x = cp.Variable()
y = cp.Variable()

# Create two constraints.
constraints = [cp.inv_pos(x) + cp.inv_pos(y) <= 1]
#constraints = [x + y >= 0]

# Form objective.
obj = cp.Minimize((x - y + 2)**2)

# Form and solve problem.
prob = cp.Problem(obj, constraints)
prob.solve()  # Returns the optimal value.
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value, y.value)

Ejemplo n.º 25
0
def solve_problem_func(env, alpha_parameter):
    # ----------parameters----------
    I = env["I"]
    M = env["M"]
    h_ul = env["h_ul"]
    bandwidth_max = env["bandwidth_max"]
    tx_power_m_max = env["tx_power_m_max"]
    comp_bs = env["comp_bs"]
    comp_m = env["comp_m"]
    task = env["task"]
    data = env["data"]
    M_list = env["M_list"]
    time_scale = env["time_scale"]
    rate_m_itr = bandwidth_max * np.log2(1 + tx_power_m_max * h_ul)
    average_rate = bandwidth_max * np.log2(1 + tx_power_m_max * h_ul) / M
    alpha_v = alpha_parameter

    # ----------itration variable parameter----------
    # this part is defined to use the iteration variable in the subproblem

    # ----------variables----------
    # define the variables
    # bandwidth allocation factor w
    omega = cp.Variable([M, I])
    # offloading factor alpha
    alpha = cp.Variable([M, I])
    # bs sever allocation factor beta
    beta = cp.Variable([M, I])
    eta = cp.Variable([M, I])
    mu = cp.Variable([M, I])

    # max delay T
    T = cp.Variable()

    # additional variables
    t1 = cp.Variable()
    t2 = cp.Variable()
    t3 = cp.Variable()

    # ----------problem formulation----------

    # --------objective function--------
    objective_func = T

    objective1 = cp.Minimize(objective_func)

    # --------constraints--------
    # offloading constraints
    c1 = [alpha >= 0, alpha <= 1]
    # bandwidth allocation constraints
    c4_5 = [cp.sum(omega) <= 1, omega >= (1e-7)]
    # bs server allocation constraints
    c2_3 = [cp.sum(beta) <= 1, beta >= (1e-7)]

    c6 = [cp.multiply(1 - alpha, task) / comp_m * time_scale <= t1]
    c7b = [
        -cp.log(t3) - cp.log(beta) + np.log2(alpha_v) +
        cp.multiply(1 / alpha_v / np.log(2),
                    (alpha - alpha_v)) + np.log2(task / comp_bs * time_scale)
        <= 0
    ]
    c8b = [
        -cp.log(t2) - cp.log(omega) + np.log2(alpha_v) + cp.multiply(
            1 / alpha_v / np.log(2),
            (alpha - alpha_v)) + np.log2(data / rate_m_itr * time_scale) <= 0
    ]
    #c8 = [cp.multiply(eta,task/comp_bs)*time_scale<=t3]
    #c7 = [cp.multiply ( mu, data / rate_m_itr)*time_scale<=t2]

    R1a2 = []
    for i in range(0, M):
        for j in range(0, I):

            R1a2 = R1a2 + [
                cp.norm(cp.vstack([2 * (alpha[i, j]), eta[i, j] - beta[i, j]]),
                        2) <= eta[i, j] + beta[i, j]
            ]
            #assert  R1a2[0].is_dqcp()

    R1a3 = [alpha <= 1e7 * beta + (1e-7) * eta - 1, alpha <= eta]

    R2a3 = [alpha <= 1e7 * omega + (1e-7) * mu - 1, alpha <= mu]

    R2a4 = [eta <= 1e7, mu <= 1e7]

    # local computing constraints R1
    #R1 = [cp.ceil(cp.multiply(alpha, task)/comp_m) <= T]
    R1 = [cp.ceil(cp.multiply(1 - alpha, task) / comp_m) - T <= 0]
    #R1 = [cp.ceil(alpha) - 10 <= 0]
    #assert R1[0].is_dqcp ()
    # offloading constraints R2
    R2a = cp.ceil(
        cp.multiply(cp.multiply(alpha, task / comp_bs), cp.inv_pos(beta)))
    R2b = cp.ceil(
        cp.multiply(
            cp.multiply(
                alpha,
                data / bandwidth_max * np.log2(1 + tx_power_m_max * h_ul)),
            cp.inv_pos(omega)))
    R2 = [R2a + R2b <= T]
    R2a1 = cp.ceil((cp.multiply(eta, task / comp_bs)))

    objective2 = cp.Minimize(
        1 / 2 * cp.max(cp.ceil(cp.multiply(alpha, task) / comp_m)))
    objective2 = cp.Minimize(
        cp.max(cp.ceil(cp.multiply(1 - alpha, task / beta / comp_bs))))

    obj3_part1 = 1 / 2 * cp.max(cp.ceil(cp.multiply(1 - alpha, task / comp_m)))
    obj3_part1 = 1 / 2 * cp.ceil(cp.max(cp.multiply(1 - alpha, task / comp_m)))
    obj3_part2 = 1 / 2 * cp.max(
        cp.ceil(cp.multiply(eta, task / comp_bs)) +
        cp.ceil(cp.multiply(mu, data / rate_m_itr)))
    obj3_part2 = 1 / 2 * cp.ceil(cp.max(cp.multiply(
        eta, task / comp_bs))) + 1 / 2 * cp.ceil(
            cp.max(cp.multiply(mu, data / rate_m_itr)))

    obj3_part3 = cp.abs(obj3_part1 - obj3_part2)
    objective3 = cp.Minimize(obj3_part1 + obj3_part2 + obj3_part3)

    #assert obj3_part1.is_dqcp ()
    #assert obj3_part1.is_dcp()
    #assert (cp.max(cp.ceil(cp.multiply(eta,task/comp_bs)))).is_dqcp()
    #assert (cp.max(cp.ceil(cp.multiply(eta,task/comp_bs)))).is_dcp()
    #assert (cp.ceil(cp.multiply(mu,data/rate_m_itr))).is_dqcp
    #assert (cp.ceil(alpha)+cp.ceil(beta)).is_dqcp()
    #assert obj3_part2.is_dqcp ()
    #assert obj3_part3.is_dqcp ()
    #assert objective3.is_dqcp ()

    t = cp.Variable([M, I])
    objective2 = cp.Minimize(cp.max(cp.ceil(cp.multiply(t, task / comp_bs))))
    c4 = [(1 - alpha) / beta <= t]

    objective6 = cp.Minimize(1 / 2 *
                             cp.ceil(t1 + t2 + t3 + cp.abs(t1 - t2 - t3)) +
                             3 / 2)

    rho = 1
    upsilon = 1
    varsigma = 1e-27
    obj7_func1 = 1 / 2 * cp.ceil(t1 + t2 + t3 + cp.abs(t1 - t2 - t3)) + 3 / 2
    obj7_func1 = 1 / 2 * (t1 + t2 + t3 + cp.abs(t1 - t2 - t3))
    obj7_func2 = cp.sum(
        cp.multiply(1 - alpha, varsigma * task * np.square(comp_m))) + cp.sum(
            tx_power_m_max * cp.multiply(mu, data / rate_m_itr))
    objective7 = cp.Minimize(rho * obj7_func1 + upsilon * obj7_func2)

    try:
        # ----------probalem solve and results----------
        # problem6 = cp.Problem(objective6, c1+c2_3+c4_5+c6+c7+c8+R1a3+R2a3+R2a4)
        problem6 = cp.Problem(objective6, c1 + c2_3 + c4_5 + c6 + c7b + c8b)
        problem6.solve(qcp=True, verbose=True, solver=cp.ECOS)
        print()
        print("problem1 solve: ", problem6.value)
        print("alpha.value", alpha.value)
        print("beta.value", beta.value)
        print("omega.value", omega.value)

        np.ceil(data / rate_m_itr * M * time_scale)
        # ----------data collection and depict-----------
        plt.clf()
        plt.subplot(441)
        plt.title("user_rate in M/s")
        # plt.bar(x=M_list, height=(average_rate/1e6).reshape(M), width=1)
        plt.plot(M_list,
                 omega.value * rate_m_itr / 1e6,
                 '-*',
                 color='b',
                 label="optimized rate")
        plt.plot(M_list,
                 average_rate / 1e6,
                 '-o',
                 color='r',
                 label="average bandwidth allocation  rate")

        plt.legend()

        plt.subplot(442)
        plt.title("data transmitting delay")
        plt.plot(
            M_list,
            data / average_rate * time_scale,
            '-*',
            color='b',
            label=
            "full date transmitting delay with average bandwidth allocation")
        plt.plot(M_list, (alpha.value * data) / (omega.value * rate_m_itr) *
                 time_scale,
                 '-o',
                 color='r',
                 label="optimized transmitting delay")
        # plt.bar(x=M_list, height=np.ceil(data / average_rate*time_scale).reshape(M), width=1)
        plt.legend(fontsize='xx-small')

        plt.subplot(443)
        plt.title("local_computing_delay")
        bar_width = 0.3
        plt.bar(x=M_list,
                height=np.ceil(time_scale * task / comp_m).reshape(M),
                width=bar_width,
                label='full local computing delay')
        plt.bar(x=M_list + bar_width,
                height=np.ceil(time_scale * (1 - alpha.value) * task /
                               comp_m).reshape(M),
                width=bar_width,
                label='remain local computing delay')

        plt.plot(M_list,
                 problem6.value * np.ones(M),
                 'o',
                 color='m',
                 label="optimized delay")
        plt.legend(fontsize='xx-small')  # 显示图例,即label
        plt.xticks(x=M_list +
                   bar_width / 2)  # 显示x坐标轴的标签,即tick_label,调整位置,使其落在两个直方图中间位置

        plt.subplot(444)
        plt.title("offloading_computing_delay")

        bar_width = 0.3  # 设置柱状图的宽度
        plt.bar(x=M_list,
                height=(np.ceil(task / (comp_bs / M)) * time_scale).reshape(M),
                width=bar_width,
                label='average full offloading computing delay')
        plt.bar(x=M_list + bar_width,
                height=(np.ceil(task * alpha.value / (beta.value * comp_bs) *
                                time_scale)).reshape(M),
                width=bar_width,
                label='optimized offloading computing delay')

        # 绘制并列柱状图

        plt.legend()  # 显示图例,即label
        plt.xticks(x=M_list +
                   bar_width / 2)  # 显示x坐标轴的标签,即tick_label,调整位置,使其落在两个直方图中间位置

        plt.subplot(445)
        plt.title("optimized local computing_delay gain")
        plt.plot(M_list, (problem6.value -
                          np.ceil(task / comp_m * time_scale)).reshape(M),
                 'x',
                 color='r')
        plt.plot(M_list,
                 problem6.value * np.ones(M),
                 'o',
                 color='m',
                 label="optimized delay")
        plt.plot(M_list,
                 t1.value * np.ones(M),
                 'v',
                 color='g',
                 label="local computing delay t1")
        plt.plot(M_list,
                 t3.value * np.ones(M),
                 '^',
                 color='k',
                 label="offloading computing delay t3")
        plt.plot(M_list,
                 t2.value * np.ones(M),
                 '*',
                 color='b',
                 label="transmitting delay t2")
        plt.legend(fontsize='xx-small')

        plt.subplot(446)
        plt.title("optimized alpha beta")
        plt.plot(M_list, alpha.value, '-v', label='alpha')
        plt.plot(M_list, beta.value, '-x', label='beta')
        plt.plot(M_list, omega.value, '^', label='omega')
        plt.legend()

        plt.subplot(447)
        plt.title("optimized omega")
        plt.plot(M_list, omega.value, '-^')

        plt.subplot(448)
        plt.title("optimized beta")
        plt.plot(M_list, beta.value, '-^')
        '''
        plt.subplot(449)
        plt.title("optimized eta")
        plt.plot(M_list, eta.value,'-^')

        plt.subplot(4,4,10)
        plt.title("optimized mu")
        plt.plot(M_list, mu.value,'-^')

        plt.subplot(4,4,11)
        plt.title("recalculated omega")
        plt.plot(M_list, alpha.value/eta.value,'-^')

        plt.subplot(4,4,12)
        plt.title("recalculated beta")
        plt.plot(M_list, alpha.value/mu.value,'-^')
        '''

        print("func1.value", obj7_func1.value)
        print(
            "local computing energy",
            cp.sum(cp.multiply(1 - alpha,
                               varsigma * task * np.square(comp_m))).value)
        print(
            "offloading energy",
            cp.sum(tx_power_m_max * cp.multiply(mu, data / rate_m_itr)).value)
        print("func2.value", obj7_func2.value)

        problem_result = {
            "problem6.value": problem6.value,
            "alpha.value": alpha.value
        }

        return problem_result

    except SolverError:
        problem6.value = 0

        problem_result = {
            "problem6.value": problem6.value,
            "alpha.value": alpha_v
        }

        return problem_result

        pass
Ejemplo n.º 26
0
    obj = cvx.Minimize(0)
    constraints = [cvx.abs(Tpow*a - cvx.mul_elemwise(y, Tpow*cvx.vstack(1, b)))
                    <= mid * Tpow*cvx.vstack(1, b)]          
    prob = cvx.Problem(obj, constraints)
    sol = prob.solve(solver=cvx.CVXOPT)
    if prob.status == cvx.OPTIMAL:
        print('gamma = {}', format(mid))
        u = mid
        a_opt = a
        b_opt = b
        objval_opt = mid
    else:
        l = mid
        
y_fit = cvx.mul_elemwise(Tpow*a_opt.value,
                         cvx.inv_pos(Tpow*cvx.vstack(1, b_opt.value)))
plt.figure(0)
plt.plot(t.A1, y.A1, label='y')
plt.plot(t.A1, y_fit.value.A1,'g-o', label='fit')
plt.xlabel('t')
plt.ylabel('y')
plt.title('A5.2: Fit vs. Exponential Function')
plt.legend(loc='lower right', frameon=False);
plt.figure(1)
plt.plot(t.A1,y_fit.value.A1-y.A1)
plt.xlabel('t')
plt.ylabel('error')
plt.title('A5.2: Fitting Error Plot')
print('a: {}'.format(a_opt.T.value))
print('b: {}'.format(b_opt.T.value))
print('optimal objective value: {}'.format(mid))
Ejemplo n.º 27
0
 def test_noop_inv_pos_constr(self):
     x = cp.Variable()
     constr = [cp.inv_pos(cp.ceil(x)) >= -5]
     problem = cp.Problem(cp.Minimize(0), constr)
     problem.solve(SOLVER, qcp=True)
     self.assertEqual(problem.status, s.OPTIMAL)
Ejemplo n.º 28
0
import numpy as np
import cvxpy as cp

from satisfy_some_constraints_data import m, n, k, A, b, c
epsilon = 1e-5

u = cp.Variable()
v = cp.Variable()
x = cp.Variable((A.shape[1], ))
f_x = A @ x - b
constraints = [
    cp.sum(cp.pos(f_x + u)) <= (m - k) * u,
    cp.inv_pos(v) <= u,
]
obj = cp.Minimize(c @ x)
p1 = cp.Problem(obj, constraints)
p1.solve()
if p1.status == 'optimal':
    lambda_value = 1 / u.value
    print(f'lambda: {lambda_value:.4f}')
    print(f'objective value: {p1.value:.4f}')
    f_values = np.matmul(A, x.value) - b
    satisfied = f_values <= epsilon
    satisfied_count = satisfied.sum()
    print(f"satisfied count: {satisfied_count}")
    #part b
    smallest_indexes = np.argsort(f_values)[:k]
    constraints = [A[smallest_indexes, :] @ x <= b[smallest_indexes]]
    p2 = cp.Problem(obj, constraints)
    p2.solve()
    print(f'Improved objective value: {p2.value:.4f}')
Ejemplo n.º 29
0
 prox("NORM_2", lambda: cp.norm(X, "fro")),
 prox("NORM_2", lambda: cp.norm2(x)),
 prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")),
 #prox("QUAD_OVER_LIN", lambda: cp.quad_over_lin(p, q1)),
 prox("SECOND_ORDER_CONE", None, C_soc_scaled),
 prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated),
 prox("SECOND_ORDER_CONE", None, C_soc_translated),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm2(x) <= t]),
 prox("SEMIDEFINITE", None, lambda: [X >> 0]),
 prox("SUM_DEADZONE", f_dead_zone),
 prox("SUM_EXP", lambda: cp.sum_entries(cp.exp(x))),
 prox("SUM_HINGE", f_hinge),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
 prox("SUM_KL_DIV", lambda: cp.sum_entries(cp.kl_div(p1,q1))),
 prox("SUM_LARGEST", lambda: cp.sum_largest(x, 4)),
 prox("SUM_LOGISTIC", lambda: cp.sum_entries(cp.logistic(x))),
 prox("SUM_NEG_ENTR", lambda: cp.sum_entries(-cp.entr(x))),
 prox("SUM_NEG_LOG", lambda: cp.sum_entries(-cp.log(x))),
 prox("SUM_QUANTILE", f_quantile),
 prox("SUM_QUANTILE", f_quantile_elemwise),
 prox("SUM_SQUARE", f_least_squares_matrix),
 prox("SUM_SQUARE", lambda: f_least_squares(20)),
 prox("SUM_SQUARE", lambda: f_least_squares(5)),
 prox("SUM_SQUARE", f_quad_form),
 prox("TOTAL_VARIATION_1D", lambda: cp.tv(x)),
 prox("ZERO", None, C_linear_equality),
 prox("ZERO", None, C_linear_equality_matrix_lhs),
 prox("ZERO", None, C_linear_equality_matrix_rhs),
Ejemplo n.º 30
0
def solveOracle(SP, startingFund, c, c_0):
    result_M = []
    result_M.append(startingFund)
    result_P = []
    result_R = []
    compNum = len(SP[0])
    ini_P_value = 1.0 / (compNum + 1)
    ini_P = [ini_P_value] * compNum
    result_P.append(ini_P)
    # calculate the return at the begining of the 2nd time period
    # ignore transaction cost for now
    R_1 = np.dot(np.divide(ini_P, SP[0]), np.subtract(SP[1], SP[0]))
    result_R.append(R_1)
    M_1 = startingFund * (1 + R_1)
    result_M.append(M_1)

    for i in xrange(1, (len(SP) - 1)):
        P_pre = result_P[-1]  #vector
        M_pre = result_M[-2]
        M = result_M[-1]
        S_pre = SP[i - 1]  #vector
        S = SP[i]
        S_next = SP[i + 1]
        # Variables:
        P = cvx.Variable(compNum)
        # Constraints:
        constraints = [0 <= P, P <= 1, sum(P) <= 1]
        # Form Objective:
        '''
		print 'P:',P
		print 'M_p:', M_pre
		print 'P_p:',P_pre
		print 'M:',M
		print 'S_pre:', S_pre
		print 'S:', S
		print '1/S:', cvx.inv_pos(S)
		'''
        term1 = cvx.mul_elemwise(np.subtract(S_next, S),
                                 cvx.mul_elemwise(cvx.inv_pos(S), P))
        absTerm = cvx.abs(
            cvx.mul_elemwise(cvx.inv_pos(S), P) -
            cvx.mul_elemwise(cvx.mul_elemwise(M_pre, P_pre),
                             cvx.inv_pos(cvx.mul_elemwise(M, S_pre))))
        term2 = cvx.mul_elemwise(S, cvx.mul_elemwise(c, absTerm))
        obj = cvx.Maximize(sum(term1 - term2) - c_0)
        # Form and solve problem:
        prob = cvx.Problem(obj, constraints)
        prob.solve()
        '''print prob.status'''
        result_P.append(P.value)
        R = prob.value
        result_R.append(R)
        M_next = M * (1 + R)
        result_M.append(M_next)
    #print result_M
    #print len(result_M)
    #print result_P
    return_accum = [0] * len(result_R)
    for i in xrange(len(result_R)):
        for j in xrange(i + 1):
            return_accum[i] += result_R[j]
    #print return_accum
    #print result_R
    #print result_M
    return result_R
Ejemplo n.º 31
0
 prox("NORM_1", lambda: cp.norm1(x)),
 prox("NORM_2", lambda: cp.norm(X, "fro")),
 prox("NORM_2", lambda: cp.norm2(x)),
 prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")),
 prox("SECOND_ORDER_CONE", None, C_soc_scaled),
 prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated),
 prox("SECOND_ORDER_CONE", None, C_soc_translated),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm2(x) <= t]),
 prox("SEMIDEFINITE", None, lambda: [X >> 0]),
 prox("SUM_DEADZONE", f_dead_zone),
 prox("SUM_EXP", lambda: cp.sum_entries(cp.exp(x))),
 prox("SUM_HINGE", f_hinge),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
 prox("SUM_KL_DIV", lambda: cp.sum_entries(cp.kl_div(p1,q1))),
 prox("SUM_LARGEST", lambda: cp.sum_largest(x, 4)),
 prox("SUM_LOGISTIC", lambda: cp.sum_entries(cp.logistic(x))),
 prox("SUM_NEG_ENTR", lambda: cp.sum_entries(-cp.entr(x))),
 prox("SUM_NEG_LOG", lambda: cp.sum_entries(-cp.log(x))),
 prox("SUM_QUANTILE", f_quantile),
 prox("SUM_QUANTILE", f_quantile_elemwise),
 prox("SUM_SQUARE", f_least_squares_matrix),
 prox("SUM_SQUARE", lambda: f_least_squares(20)),
 prox("SUM_SQUARE", lambda: f_least_squares(5)),
 prox("SUM_SQUARE", f_quad_form),
 prox("TOTAL_VARIATION_1D", lambda: cp.tv(x)),
 prox("ZERO", None, C_linear_equality),
 prox("ZERO", None, C_linear_equality_matrix_lhs),
 prox("ZERO", None, C_linear_equality_matrix_rhs),
Ejemplo n.º 32
0
x=Variable (m,1)
objective = Minimize(normInf(matrix(A)*x-ones((n,1))))
constraints =[x>=0,x<=1]
pro = Problem(objective, constraints) 
result = pro.solve()
print x.value
val_ls_chev = np.max(np.abs(log(A*matrix(x.value))))
print val_ls_chev

#solution 5 cvxpy

from cvxpy import max
y=Variable (m,1)
Am = matrix(A)
qq = [max(Am[i,:]*y,inv_pos(Am[i,:]*y)) for i in range(n)]
objective1 = Minimize(max(*qq))
constraints1 =[y>=0,y<=1]
pro1 = Problem(objective1, constraints1) 
result1 = pro1.solve()
print y.value
val_ls_cvx = np.max(np.abs(log(A*matrix(y.value))))
print val_ls_cvx

#solution 6 cvxpy equvelent

z=Variable (m,1)
u = Variable(1)
qq = [(max(Am[i,:]*z,inv_pos(Am[i,:]*z))<=u) for i in range(n)]
objective2 = Minimize(u)
pp =[z>=0,z<=1]
Ejemplo n.º 33
0
    def get_allocation(self, unflattened_throughputs, scale_factors,
                       unflattened_priority_weights, times_since_start,
                       num_steps_remaining, cluster_spec):
        throughputs, index = super().flatten(unflattened_throughputs,
                                             cluster_spec)
        if throughputs is None:
            self._isolated_throughputs_prev_iteration = {}
            self._num_steps_remaining_prev_iteration = {}
            return None
        (m, n) = throughputs.shape
        (job_ids, worker_types) = index

        # Row i of scale_factors_array is the scale_factor of job i
        # repeated len(worker_types) times.
        scale_factors_array = self.scale_factors_array(scale_factors, job_ids,
                                                       m, n)

        # TODO: Do something with these priority_weights.
        priority_weights = np.array(
            [1. / unflattened_priority_weights[job_id] for job_id in job_ids])

        # Create allocation variable, and isolated allocation.
        x = cp.Variable(throughputs.shape)
        isolated_throughputs = self._isolated_policy.get_throughputs(
            throughputs, index, scale_factors, cluster_spec)
        expected_time_fractions = []
        for i in range(len(job_ids)):
            if job_ids[i] not in self._cumulative_isolated_time:
                self._cumulative_isolated_time[job_ids[i]] = 0
            if job_ids[i] in self._num_steps_remaining_prev_iteration:
                self._cumulative_isolated_time[job_ids[i]] += (
                    self._num_steps_remaining_prev_iteration[job_ids[i]] -
                    num_steps_remaining[job_ids[i]]) / \
                    self._isolated_throughputs_prev_iteration[job_ids[i]]

            allocation_throughput = cp.sum(cp.multiply(throughputs[i], x[i]))
            expected_time_isolated = self._cumulative_isolated_time[job_ids[i]] + \
                (num_steps_remaining[job_ids[i]] / isolated_throughputs[i])
            expected_time_allocation = times_since_start[job_ids[i]] + \
                (num_steps_remaining[job_ids[i]] * cp.inv_pos(allocation_throughput))
            expected_time_fraction = expected_time_allocation / expected_time_isolated
            expected_time_fractions.append(expected_time_fraction)
        if len(expected_time_fractions) == 1:
            objective = cp.Minimize(expected_time_fractions[0])
        else:
            objective = cp.Minimize(cp.maximum(*expected_time_fractions))

        # Make sure that the allocation can fit in the cluster.
        constraints = self.get_base_constraints(x, scale_factors_array)

        cvxprob = cp.Problem(objective, constraints)
        result = cvxprob.solve(solver=self._solver)

        if cvxprob.status != "optimal":
            print('WARNING: Allocation returned by policy not optimal!')

        self._num_steps_remaining_prev_iteration = copy.copy(
            num_steps_remaining)
        self._isolated_throughputs_prev_iteration = {}
        for i in range(m):
            self._isolated_throughputs_prev_iteration[job_ids[i]] = \
                isolated_throughputs[i]

        if x.value is None:
            return self._isolated_policy.get_allocation(
                unflattened_throughputs, scale_factors, cluster_spec)
        return super().unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
Ejemplo n.º 34
0
    def get_allocation(self, unflattened_throughputs, scale_factors,
                       unflattened_priority_weights, times_since_start,
                       num_steps_remaining, cluster_spec):
        all_throughputs, index = \
            self.flatten(d=unflattened_throughputs,
                         cluster_spec=cluster_spec,
                         priority_weights=unflattened_priority_weights)
        if all_throughputs is None or len(all_throughputs) == 0:
            self._isolated_throughputs_prev_iteration = {}
            self._num_steps_remaining_prev_iteration = {}
            return None

        (m, n) = all_throughputs[0].shape
        (job_ids, single_job_ids, worker_types, relevant_combinations) = index
        x = cp.Variable((m, n))

        # Row i of scale_factors_array is the scale_factor of job
        # combination i repeated len(worker_types) times.
        scale_factors_array = self.scale_factors_array(scale_factors, job_ids,
                                                       m, n)

        throughputs_no_packed_jobs = np.zeros((len(single_job_ids), n))
        for i, single_job_id in enumerate(single_job_ids):
            for j, worker_type in enumerate(worker_types):
                throughputs_no_packed_jobs[i, j] = \
                    unflattened_throughputs[single_job_id][worker_type]
        isolated_throughputs = self._isolated_policy.get_throughputs(
            throughputs_no_packed_jobs, (single_job_ids, worker_types),
            scale_factors, cluster_spec)

        single_throughputs = np.zeros((len(single_job_ids), n))
        expected_time_fractions = []
        for i in range(len(all_throughputs)):
            if single_job_ids[i] not in self._cumulative_isolated_time:
                self._cumulative_isolated_time[single_job_ids[i]] = 0
            if single_job_ids[i] in self._num_steps_remaining_prev_iteration:
                self._cumulative_isolated_time[single_job_ids[i]] += (
                    self._num_steps_remaining_prev_iteration[single_job_ids[i]] -
                    num_steps_remaining[single_job_ids[i]]) / \
                    self._isolated_throughputs_prev_iteration[single_job_ids[i]]

            indexes = relevant_combinations[single_job_ids[i]]
            isolated_throughput = isolated_throughputs[i]
            allocation_throughput = cp.sum(
                cp.multiply(all_throughputs[i][indexes], x[indexes]))
            expected_time_isolated = self._cumulative_isolated_time[single_job_ids[i]] + \
                (num_steps_remaining[single_job_ids[i]] / isolated_throughput)
            expected_time_allocation = times_since_start[single_job_ids[i]] + \
                (num_steps_remaining[single_job_ids[i]] * cp.inv_pos(allocation_throughput))
            expected_time_fraction = expected_time_allocation / expected_time_isolated
            expected_time_fractions.append(expected_time_fraction)
        if len(expected_time_fractions) == 1:
            objective = cp.Minimize(expected_time_fractions[0])
        else:
            objective = cp.Minimize(cp.maximum(*expected_time_fractions))

        # Make sure the allocation can fit in the cluster.
        constraints = self.get_base_constraints(x, single_job_ids,
                                                scale_factors_array,
                                                relevant_combinations)

        # Explicitly constrain all allocation values with an effective scale
        # factor of 0 to be 0.
        # NOTE: This is not strictly necessary because these allocation values
        # do not affect the optimal allocation for nonzero scale factor
        # combinations.
        for i in range(m):
            for j in range(n):
                if scale_factors_array[i, j] == 0:
                    constraints.append(x[i, j] == 0)
        cvxprob = cp.Problem(objective, constraints)
        result = cvxprob.solve(solver=self._solver)

        if cvxprob.status != "optimal":
            print('WARNING: Allocation returned by policy not optimal!')

        self._num_steps_remaining_prev_iteration = copy.copy(
            num_steps_remaining)
        self._isolated_throughputs_prev_iteration = {}
        for i in range(len(all_throughputs)):
            self._isolated_throughputs_prev_iteration[single_job_ids[i]] = \
                isolated_throughputs[i]

        return self.unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
Ejemplo n.º 35
0
 def test_infeasible_inv_pos_constr(self):
     x = cp.Variable(nonneg=True)
     constr = [cp.inv_pos(cp.ceil(x)) <= -5]
     problem = cp.Problem(cp.Minimize(0), constr)
     problem.solve(SOLVER, qcp=True)
     self.assertEqual(problem.status, s.INFEASIBLE)
Ejemplo n.º 36
0
       Rij.transpose()) / 2.  # R1+R2 combined radii (min distance) matrix
Iij = (Iij + Iij.transpose()) / 2.  # Intensity matrix
Iij[Iij < 1] = 1
Iij[20, 15] = 100000
Rij[Rij < 1e-6] = 1e-6
Iij = Iij / 1000

c = cvx.Variable(dim * dim)
constr = []
# for i in range(dim):
#     for j in range(i, dim):
#         constr.append(c[i, j] >= Rij[i, j])
for i in range(dim * dim):
    constr.append(c[i] >= Rij.flatten()[i])
prob = cvx.Problem(
    cvx.Maximize(cvx.norm(cvx.multiply(Iij.flatten(), cvx.inv_pos(c)), 1)),
    constr)
#prob = cvx.Problem(cvx.Minimize(cvx.max(cvx.max(cvx.abs(cvx.multiply(cvx.inv_pos(cvx.multiply(1./Iij.flatten(), c)), 1/100))))), constr)
prob.solve(method="dccp", solver=cvx.MOSEK, verbose=True)

rij_opt = np.reshape(c.value, (dim, dim))

r = rij[0, 1] / rij_opt[0, 1]
for i, j in product(range(dim), range(dim)):
    if i >= j: continue
    print("d[%d,%d]: %1.2f(true) %1.2f(sol.)" %
          (i, j, rij[i, j], rij_opt[i, j] * r))

df['opt. distance'] = df.index.size * [0]
for i1 in range(dim):
    for i2 in range(dim):
Ejemplo n.º 37
0
val_ls_reg = np.max(np.abs(log(A * matrix(p_ls_reg))))
print p_ls_reg
print val_ls_reg

###########################################################################################
# solution 4
# chebshev approximation
p_chev = cp.Variable(m)
objective = cp.Minimize(cp.norm(A * p_chev - ones((n, 1)), "inf"))
constraints = [p_chev <= 1, p_chev >= 0]
p4 = cp.Problem(objective, constraints)
result = p4.solve()
f_4 = np.max(np.abs(log(A * matrix(p_chev.value))))
print p_chev.value
print f_4


###########################################################################################
# solution 5
# cvxpy
u = cp.Variable(1)
p_cp = cp.Variable(m)
objective = cp.Minimize(u)
constraints = [cp.max(matrix(A[i, :]) * p_cp, cp.inv_pos(matrix(A[i, :]) * p_cp)) <= u for i in range(n)]
constraints.extend([p_cp <= 1, p_cp >= 0])
p5 = cp.Problem(objective, constraints)
result = p5.solve()
f_5 = np.max(np.abs(log(A * matrix(p_cp.value))))
print p_cp.value
print f_5
Ejemplo n.º 38
0
#fixing data
d = np.asarray(d)
smin = np.asarray(smin)
smax = np.asarray(smax)
tau_min = np.asarray(tau_min)
tau_max = np.asarray(tau_max)

d = d[:, 0]
smin = smin[:, 0]
smax = smax[:, 0]
tau_min = tau_min[:, 0]
tau_max = tau_max[:, 0]

k = cp.Variable(n)
h = cp.cumsum(k)
phi = a * cp.multiply(cp.inv_pos(k), d**2) + c * k + cp.multiply(b, d)
obj = cp.Minimize(cp.sum(phi))
constraints = [
    cp.multiply(smin, k) <= d,
    cp.multiply(smax, k) >= d,
    tau_min <= h,
    tau_max >= h,
]
problem = cp.Problem(obj, constraints)
problem.solve()
print(f"status: {problem.status}")
if problem.status == 'optimal':
    print(f"Total fuel: {problem.value}")
    s = d / k.value
    plt.step(np.arange(n), s)
    plt.show()