Esempio n. 1
0
def one_shot_compressed_sensing_decode(x0, y, p_matrix, delta):
    _size = y.shape[0]
    _app = p_matrix.shape[0]
    use = []
    use.append(x0)
    X = cvx.Variable((_app))
    #print(k)
    objective = cvx.Minimize(cvx.norm(X, 1))
    #if abs(k)>p_matrix[-1]:
    #    constraints = [(X.T*p_matrix-k)<=delta, (X.T*p_matrix-k)>=-delta]
    #else:
    #constraints = [(X.T*p_matrix-k)<=delta, (X.T*p_matrix-k)>=-delta,cvx.max(X)<=1,cvx.min(X)>=-1]
    constraints = [(X.T * p_matrix - k) <= delta,
                   (X.T * p_matrix - k) >= -delta,
                   cvx.max(X) <= 1,
                   cvx.min(X) >= -1]
    prob = cvx.Problem(objective, constraints)
    prob.solve(solver=cvx.ECOS_BB)
    if X.value is None:
        X = cvx.Variable((_app))
        #constraints = [(X.T*p_matrix-k)<=20, (X.T*p_matrix-k)>=-20,cvx.max(X)<=1,cvx.min(X)>=-1]
        #constraints = [(X.T*p_matrix-k)<=20, (X.T*p_matrix-k)>=-20]
        constraints = [cvx.max(X) <= 1, cvx.min(X) >= -1]
        prob = cvx.Problem(objective, constraints)
        prob.solve(solver=cvx.ECOS_BB)
        if X.value is None:
            return []
    print(X.value)
    return np.abs(x0 - X.value)
Esempio n. 2
0
def max_minimum_allocation(instance) -> Allocation:
    """
    Find the max-minimum (aka Egalitarian) allocation.
    :param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.

    :return allocation_matrix:  a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
    The allocation should maximize the leximin vector of utilities.
    >>> a = max_minimum_allocation([ [3] , [5] ])   # single item
    >>> a
    Agent #0 gets { 62.5% of 0} with value 1.88.
    Agent #1 gets { 37.5% of 0} with value 1.88.
    <BLANKLINE>
    >>> a.matrix
    [[0.625]
     [0.375]]
    >>> max_minimum_allocation([ [4,2] , [1,4] ]).round(3).matrix   # two different items
    [[1. 0.]
     [0. 1.]]
    >>> alloc = max_minimum_allocation([ [3,3] , [1,1] ]).round(3).matrix   # two identical items
    >>> [sum(alloc[i]) for i in alloc.agents()]
    [0.5, 1.5]
    >>> v = [ [4,2] , [1,3] ]   # two different items
    >>> a = max_minimum_allocation(v).round(3)
    >>> a.matrix
    [[0.8 0. ]
     [0.2 1. ]]
    >>> print(a.utility_profile())
    [3.2 3.2]
    """
    return max_welfare_allocation(instance,
        welfare_function=lambda utilities: cvxpy.min(cvxpy.hstack(utilities)),
        welfare_constraint_function=lambda utility: utility >= 0)
Esempio n. 3
0
    def __init__(self, K, J, **kwargs):
        super().__init__(K, J, **kwargs)
        self.F = cvx.Variable((self.K, self.J), nonneg=True)
        self.Fk = cvx.Parameter((self.K, self.J), nonneg=True)
        self.Pk = cvx.Parameter((self.K, self.J), nonneg=True)
        self.Pmax = cvx.Parameter(nonneg=True)
        self.N = cvx.Parameter(nonneg=True)
        self.df = cvx.Parameter(nonneg=True)
        self.rho = cvx.Parameter((self.K, self.J), nonneg=True)
        self.lam = cvx.Parameter(nonneg=True)

        self.obj = 0
        for k in range(self.K):
            num = 0
            for j in range(self.J):
                num += self.rho[k, j] * self.Pk[k, j] * self.F[k, j]
            self.obj += cvx.log(1 + num) - (cvx.max(self.rho[k, :]) -
                                            cvx.min(self.rho[k, :])) / 2

        constraints = [  #cvx.norm(self.F - self.Fk, 1) <= self.delta,
            cvx.sum(self.F, axis=0) <= self.N,
            cvx.sum(self.F, axis=0) >= 1,
            cvx.sum(self.F, axis=1) <= self.df, self.F <= 1
        ]
        for j in range(self.J):
            for k in range(self.K):
                self.obj += self.lam * ((self.Fk[k, j]**2 - self.Fk[k, j]) +
                                        (2 * self.Fk[k, j] - 1) *
                                        (self.F[k, j] - self.Fk[k, j]))
            constraints.append(self.Pk[:, j] * self.F[:, j] <= self.Pmax)
            constraints.append(self.Pk[:, j] * self.F[:, j] >= 0)

        self.prob = cvx.Problem(cvx.Maximize(self.obj), constraints)
Esempio n. 4
0
def getIndirectUtil(valuation, prices, budget, utility = "linear", rho = None):
    """
    Given a vector of consumer valuations, v, a price vector, p, and a budget, compute the utility
    of a utility-maximizing bundle. Mathematically, solve the linear program:
    max_{x} xv
    s.t. xp <= budget
    :param valuation: a consumer's valuation for goods.
    :param prices: prices of goods.
    :param budget: the consumer's budget
    :return:
    """
  
    num_items = len(valuation)
    x = cp.Variable(num_items)
    
    if (utility == "linear"):
        obj = cp.Maximize(x.T @ valuation)
    elif (utility == "leontief"):
        obj = cp.Maximize( cp.min( cp.multiply(x, 1/valuation) ) )
    elif (utility == "cobb-douglas"):
        obj = cp.Maximize( cp.sum(cp.multiply(valuation, cp.log(x))))
    elif (utility == "ces"):
        x_rho = cp.power(x, rho)
        util = valuation.T @ x_rho
        obj = cp.Maximize((1/rho)*cp.log(util))
    else:
        obj = cp.Maximize(x.T @ (valuation - prices))

    constraints = [ (x.T @ prices) <= budget,
                    x >= 0]
    prob = cp.Problem(obj, constraints)

    return prob.solve()
Esempio n. 5
0
def one_shot_compressed_sensing_decode(x0,y,p_matrix,delta):
    _app = p_matrix.shape[0]
    #print(_app)
    X = cvx.Variable((_app))
    #print(k)
    objective = cvx.Minimize(cvx.norm(X,1))
    #print(y)
    #if abs(k)>p_matrix[-1]:
    #    constraints = [(X.T*p_matrix-k)<=delta, (X.T*p_matrix-k)>=-delta]
    #else:
    #constraints = [(X.T*p_matrix-k)<=delta, (X.T*p_matrix-k)>=-delta,cvx.max(X)<=1,cvx.min(X)>=-1]
    constraints = [(X.T*p_matrix-y)<=delta, (X.T*p_matrix-y)>=-delta,cvx.max(X)<=1,cvx.min(X)>=0]
    prob = cvx.Problem(objective, constraints)
    prob.solve(solver=cvx.ECOS_BB)
    if X.value is None:
        #print("error")
        #print(x0.shape[0])
        return x0.shape[0]
        '''
        X = cvx.Variable((_app))
        #constraints = [(X.T*p_matrix-k)<=20, (X.T*p_matrix-k)>=-20,cvx.max(X)<=1,cvx.min(X)>=-1]
        #constraints = [(X.T*p_matrix-k)<=20, (X.T*p_matrix-k)>=-20]
        constraints = [cvx.max(X)<=1,cvx.min(X)>=-1]
        prob = cvx.Problem(objective, constraints)
        prob.solve(solver=cvx.ECOS_BB)
        if X.value is None:
            return []
        '''
    #print(X.value)
    return np.sum(np.abs(x0-X.value))
Esempio n. 6
0
    def test_min(self):
        x = cp.Variable(pos=True)
        y = cp.Variable(pos=True)

        alpha = cp.Parameter(pos=True, value=1.0, name='alpha')
        beta = cp.Parameter(pos=True, value=3.0, name='beta')
        prod1 = x * y**alpha
        prod2 = beta * x * y**alpha
        posy = prod1 + prod2
        obj = cp.Maximize(cp.min(cp.hstack([prod1, prod2, 1 / posy])))
        constr = [x == alpha, y == 4.0]

        dgp = cp.Problem(obj, constr)
        dgp.solve(SOLVER, gp=True, enforce_dpp=True)
        # prod1 = 1*4, prod2 = 3*4 = 12, 1/posy = 1/(3 +12)
        self.assertAlmostEqual(dgp.value, 1.0 / (4.0 + 12.0))
        self.assertAlmostEqual(x.value, 1.0)
        self.assertAlmostEqual(y.value, 4.0)

        alpha.value = 2.0
        # prod1 = 2*16, prod2 = 3*2*16 = 96, 1/posy = 1/(32 +96)
        dgp.solve(SOLVER, gp=True, enforce_dpp=True)
        self.assertAlmostEqual(dgp.value, 1.0 / (32.0 + 96.0))
        self.assertAlmostEqual(x.value, 2.0)
        self.assertAlmostEqual(y.value, 4.0)
Esempio n. 7
0
    def _optimize(self, optimize_array, solver=cp.SCS):
        """
        Calculates weights that maximize returns over the given array.

        :param optimize_array: (np.array) Relative returns of the assets for a given time period.
        :param solver: (cp.solver) Solver for cvxpy
        :return: (np.array) Weights that maximize the returns for the given array.
        """

        # Initialize weights for the optimization problem.
        weights = cp.Variable(self.number_of_assets)

        # Use cp.log and cp.sum to make the cost function a convex function.
        # Multiplying continuous returns equates to summing over the log returns.
        portfolio_return = cp.sum(cp.log(optimize_array @ weights))

        # Optimization objective and constraints.
        allocation_objective = cp.Maximize(portfolio_return)
        allocation_constraints = [cp.sum(weights) == 1, cp.min(weights) >= 0]

        # Define and solve the problem.
        problem = cp.Problem(objective=allocation_objective,
                             constraints=allocation_constraints)

        # Solve and return the resulting weights.
        problem.solve(warm_start=True, solver=solver)
        return weights.value
Esempio n. 8
0
    def test_min(self):
        """Test min.
        """
        # One arg, test sign.
        self.assertEqual(cp.min(1).sign, s.NONNEG)
        self.assertEqual(cp.min(-2).sign, s.NONPOS)
        self.assertEqual(cp.min(Variable()).sign, s.UNKNOWN)
        self.assertEqual(cp.min(0).sign, s.ZERO)

        # Test with axis argument.
        self.assertEqual(cp.min(Variable(2), axis=0).shape, tuple())
        self.assertEqual(cp.min(Variable(2), axis=1).shape, (2, ))
        self.assertEqual(cp.min(Variable((2, 3)), axis=0).shape, (3, ))
        self.assertEqual(cp.min(Variable((2, 3)), axis=1).shape, (2, ))

        # Invalid axis.
        with self.assertRaises(Exception) as cm:
            cp.min(self.x, axis=4)
        self.assertEqual(str(cm.exception), "Invalid argument for axis.")
Esempio n. 9
0
 def test_min(self) -> None:
     x = cp.Variable(2)
     expr = cp.min(cp.ceil(x))
     problem = cp.Problem(cp.Maximize(expr),
                          [x[0] >= 11.9, x[0] <= 15.8, x[1] >= 17.4])
     self.assertTrue(problem.is_dqcp())
     problem.solve(SOLVER, qcp=True)
     self.assertAlmostEqual(problem.objective.value, 16.0)
     self.assertLess(x[0].value, 16.0)
     self.assertGreater(x[0].value, 14.9)
     self.assertGreater(x[1].value, 17.3)
Esempio n. 10
0
def get_optimal_weights(M):
	
	if not np.all(np.linalg.eigvals(M) > 0):
		w, v	= np.linalg.eig(M)
		w	= w.clip(min=1e-6)
		M	= np.dot(np.dot(v, np.diag(w)), v.T)
	
	W		= cvx.Variable(shape=(M.shape[0], 1))
	constraints	= [cvx.sum(W)==1, cvx.min(W)>=0]
	obj		= cvx.Minimize(cvx.quad_form(W, M))
	prob		= cvx.Problem(obj, constraints)
	prob.solve(solver=cvx.ECOS)
	
	weights		= [W.value[i,0] for i in range(M.shape[0])]
	return weights
Esempio n. 11
0
    def test_min(self):
        x = cvxpy.Variable(pos=True)
        y = cvxpy.Variable(pos=True)

        prod1 = x * y**0.5
        prod2 = 3.0 * x * y**0.5
        posy = prod1 + prod2
        obj = cvxpy.Maximize(cvxpy.min(cvxpy.hstack([prod1, prod2, 1 / posy])))
        constr = [x == 1.0, y == 4.0]

        dgp = cvxpy.Problem(obj, constr)
        dgp.solve(SOLVER, gp=True)
        self.assertAlmostEqual(dgp.value, 1.0 / (2.0 + 6.0), places=4)
        self.assertAlmostEqual(x.value, 1.0)
        self.assertAlmostEqual(y.value, 4.0)
Esempio n. 12
0
def ms_gd_leontief(valuations,
                   budgets,
                   prices_0,
                   learning_rate,
                   num_iters,
                   decay=True):
    prices = prices_0
    prices_hist = []
    demands_hist = []
    for iter_outer in range(1, num_iters):
        if (not iter_outer % 50):
            print(f" ----- Iteration {iter_outer}/{num_iters} ----- ")

        prices_hist.append(prices)
        demands = np.zeros(valuations.shape)

        X = cp.Variable(valuations.shape)
        obj = cp.Maximize(
            np.sum(prices) +
            budgets.T @ cp.log(cp.min(cp.multiply(X,
                                                  (1 / valuations)), axis=1)))
        constr = [X >= 0, X @ prices <= budgets]
        prob = cp.Problem(obj, constr)

        try:
            prob.solve(solver="ECOS")
        except cp.SolverError:
            prob.solve(solver="SCS")

        demands = X.value

        demands = demands.clip(min=0)
        demands_hist.append(demands)
        demand = np.sum(demands, axis=0)
        excess_demand = demand - 1

        if (decay):
            step_size = learning_rate * iter_outer**(-1 / 2) * excess_demand
            prices += step_size * ((prices) > 0)
        else:
            step_size = learning_rate * excess_demand
            prices += step_size * ((prices) > 0)

        prices = prices.clip(min=0.00001)

    return (demands, prices, demands_hist, prices_hist)
Esempio n. 13
0
    def get_allocation(self, unflattened_throughputs, scale_factors,
                       unflattened_priority_weights, cluster_spec):
        throughputs, index = super().flatten(unflattened_throughputs,
                                             cluster_spec)
        if throughputs is None: return None
        (m, n) = throughputs.shape
        (job_ids, worker_types) = index

        # Row i of scale_factors_array is the scale_factor of job i
        # repeated len(worker_types) times.
        scale_factors_array = self.scale_factors_array(scale_factors, job_ids,
                                                       m, n)

        priority_weights = np.array(
            [1. / unflattened_priority_weights[job_id] for job_id in job_ids])

        proportional_throughputs = self._proportional_policy.get_throughputs(
            throughputs, index, cluster_spec)
        priority_weights = np.multiply(
            priority_weights.reshape((m, 1)),
            1.0 / proportional_throughputs.reshape((m, 1)))

        x = cp.Variable(throughputs.shape)
        # Multiply throughputs by scale_factors to ensure that scale_factor
        # is taken into account while allocating times to different jobs.
        # A job run on 1 GPU should receive `scale_factor` more time than
        # a job run on `scale_factor` GPUs if throughputs are equal.
        objective = cp.Maximize(
            cp.min(
                cp.sum(cp.multiply(
                    np.multiply(throughputs * priority_weights.reshape((m, 1)),
                                scale_factors_array), x),
                       axis=1)))
        # Make sure that the allocation can fit in the cluster.
        constraints = self.get_base_constraints(x, scale_factors_array)
        cvxprob = cp.Problem(objective, constraints)
        result = cvxprob.solve(solver=self._solver)

        if cvxprob.status != "optimal":
            print('WARNING: Allocation returned by policy not optimal!')

        return super().unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
Esempio n. 14
0
def max_minimum_allocation_for_families(instance, families) -> AllocationToFamilies:
    """
    Find the max-minimum (aka Egalitarian) allocation.
    :param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
    :param families: a list of lists. Each list represents a family and contains the indices of the agents in the family.

    :return allocation_matrix:  a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
    The allocation should maximize the leximin vector of utilities.
    >>> families = [ [0], [1] ]  # two singleton families
    >>> max_minimum_allocation_for_families([ [3] , [5] ],families).round(3).matrix
    [[0.625]
     [0.375]]
    >>> max_minimum_allocation_for_families([ [4,2] , [1,4] ], families).round(3).matrix   # two different items
    [[1. 0.]
     [0. 1.]]
    >>> alloc = max_minimum_allocation_for_families([ [3,3] , [1,1] ], families).round(3).matrix   # two identical items
    >>> [sum(alloc[i]) for i in alloc.agents()]
    [0.5, 1.5]
    >>> v = [ [4,2] , [1,3] ]   # two different items
    >>> a = max_minimum_allocation_for_families(v, families).round(3)
    >>> a
    Family #0 with members [0] gets { 80.0% of 0} with values [3.2].
    Family #1 with members [1] gets { 20.0% of 0, 100.0% of 1} with values [3.2].
    <BLANKLINE>
    >>> a.matrix
    [[0.8 0. ]
     [0.2 1. ]]
    >>> print(a.utility_profile())
    [3.2 3.2]
    >>> families = [ [0, 1] ]  # One couple
    >>> max_minimum_allocation_for_families([ [4,2] , [1,4] ], families).round(3).matrix
    [[1. 1.]]
    >>> families = [ [0, 1], [2, 3] ]  # Two couples
    >>> a = max_minimum_allocation_for_families([ [4,2] , [1,4], [3,3], [5,5] ], families).round(3).matrix
    >>> a
    [[0.414 0.621]
     [0.586 0.379]]
    """
    return max_welfare_allocation_for_families(instance, families,
        welfare_function=lambda utilities: cvxpy.min(cvxpy.hstack(utilities)),
        welfare_constraint_function=lambda utility: utility >= 0)
Esempio n. 15
0
    def __init__(self, K, J, **kwargs):
        super().__init__(K, J, **kwargs)
        self.P = cvx.Variable((self.K, self.J), nonneg=True)
        self.Fk = cvx.Parameter((self.K, self.J), nonneg=True)
        # self.Pk = cvx.Parameter((self.K, self.J), nonneg=True)
        self.Pmax = cvx.Parameter(nonneg=True)
        self.rho = cvx.Parameter((self.K, self.J), nonneg=True)

        self.obj = 0
        constraints = []  #[cvx.norm(self.P - self.Pk, 1) <= self.delta]

        for k in range(self.K):
            num = 0
            for j in range(self.J):
                num += self.rho[k, j] * self.P[k, j] * self.Fk[k, j]
            self.obj += cvx.log(1 + num) - (cvx.max(self.rho[k, :]) -
                                            cvx.min(self.rho[k, :])) / 2

        for j in range(self.J):
            constraints.append(self.P[:, j] * self.Fk[:, j] <= self.Pmax)
            constraints.append(self.P[:, j] * self.Fk[:, j] >= 0)

        self.prob = cvx.Problem(cvx.Maximize(self.obj), constraints)
Esempio n. 16
0
#solving problem of hw3_1 (extra question)
import cvxpy as cp
import cvxopt
A= cvxopt.matrix([1,2,0,1,0,0,3,1,0,3,1,1,2,1,2,5,1,0,3,2],(4,5)).T
c_max = cvxopt.matrix([100,100,100,100,100],(5,1))
p= cvxopt.matrix([3,2,7,6])
p_disc=cvxopt.matrix([2,1,4,2])
q=cvxopt.matrix([4,10,5,10])
u=cp.Variable(4,1)
x=cp.Variable(4,1)
objective = cp.Maximize(cp.sum(u))
constraints1 =[cp.min(p[i]*x[i],p[i]*q[i]+p_disc[i]*(x[i]-q[i]))>=u[i] for i in range(4)]
constraints =[x>=0, A*x<=c_max]
constraints1.extend(constraints)
prob = cp.Problem(objective,constraints1)
prob.solve()
print x.value
lamb = constraints1[2].dual_value

#############################################################
#original fomulation
#############################################################
print "equavlent form"
xx = cp.Variable(4,1)
objective1 = cp.Maximize(sum([cp.min(p[i]*xx[i],p[i]*q[i]+p_disc[i]*(xx[i]-q[i])) for i in range(4)]))
constraints2 = [xx>=0, A*xx<=c_max]
prob1 = cp.Problem(objective1, constraints2)
prob1.solve()
print xx.value
def train_erm_min_welfare(X, L_mat, U_mat, groups, lamb=1000):
    L_X = np.matmul(X, L_mat.T)
    U_X = np.matmul(X, U_mat.T)
    n, d = L_X.shape
    n, m = X.shape

    # For each group, compute its U_X and L_X matrix
    num_groups = len(groups.keys())
    group_ids = groups.keys()
    group_sizes = [groups[i].shape[0] for i in range(num_groups)]
    group_LX, group_UX = {}, {}
    for i in range(num_groups):
        group_LX[i] = np.matmul(groups[i], L_mat.T)
        group_UX[i] = np.matmul(groups[i], U_mat.T)

    # Constructing argmax/min labels used repeatedly
    y = np.argmin(L_X, axis=1)
    s, b = [], []
    for i in range(num_groups):
        s.append(np.argmin(group_UX[i], axis=1))
        b.append(np.argmax(group_UX[i], axis=1))

    learned_betas = []
    learned_predictions = {h: [] for h in range(num_groups)}
    learned_predictions_all = []
    def_alphas = get_default_alpha_arr(K)

    def get_min_welf_estimate(given_Beta):
        for i in range(num_groups):
            # First compute the utility group i has for itself
            USFii = 0
            concave_p = 0
            min_welf = 100
            for t in range(k):
                for li in range(group_sizes[i]):
                    USFii += def_alphas[t] * group_UX[i][
                        li, learned_predictions[i][t][li]]

            for li in range(group_sizes[i]):
                concave_version_p = np.min(group_UX[i][li, :] - np.matmul(given_Beta,groups[i][li,:])) + \
                        np.matmul(given_Beta[b[i][li],:],groups[i][li,:])
                concave_p += def_alphas[k] * concave_version_p

            USFii = USFii * (1 / group_sizes[i])
            concave_p = concave_p * (1 / group_sizes[i])
            min_welf = min(min_welf, USFii + concave_p)
        return min_welf

    # Run code to compute the mixture iteratively
    for k in range(K):
        Beta = cp.Variable(
            (d, m))  # The parameters of the one-vs-all classifier

        # Solve relaxed convexified optimization problem
        loss_objective = 0
        for i in range(n):
            # construct list with entries L(x_i, y) + beta_y^T x_i - beta_{y_i}^T x_i; for each y and y_i defined appropriately
            loss_objective += get_convex_version(X, L_X, Beta, y, i)
        loss_objective = (1 / n) * loss_objective

        # Our Envy-Free Objective is over groups - so iterate over them
        for i in range(num_groups):
            # First compute the utility group i has for itself
            USFii = 0
            concave_p = 0
            min_welf = 100
            for t in range(k):
                for li in range(group_sizes[i]):
                    USFii += def_alphas[t] * group_UX[i][
                        li, learned_predictions[i][t][li]]

            for li in range(group_sizes[i]):
                concave_version_p = cp.min(group_UX[i][li, :] - cp.matmul(Beta,groups[i][li,:])) + \
                        cp.matmul(Beta[b[i][li],:],groups[i][li,:])
                concave_p += def_alphas[k] * concave_version_p

            USFii = USFii * (1 / group_sizes[i])
            concave_p = concave_p * (1 / group_sizes[i])
            min_welf = cp.minimum(min_welf, USFii + concave_p)

        #objective = cp.Maximize((1/100)*((-1/10)*loss_objective + lamb*min_welf))
        objective = cp.Maximize(lamb * min_welf)
        prob = cp.Problem(objective)

        # Solving the problem
        try:
            #results = prob.solve(solver=cp.SCS, verbose=False)#, feastol=1e-5, abstol=1e-5)
            resuls = prob.solve(verbose=False)
        except:
            return 0, 0, 0, 0
        Beta_value = np.array(Beta.value)
        #print("beta: ", Beta_value)
        learned_betas.append(Beta_value)

        min_welfare_estimate = get_min_welf_estimate(Beta_value)
        #print("Min welfare estimate: " , min_welfare_estimate)

        all_predictions = predictions(Beta_value, X)
        learned_predictions_all.append(all_predictions)

        for h in range(num_groups):
            learned_predictions[h].append(predictions(Beta_value, groups[h]))

    # We now solve for the optimal alpha values
    alphas = cp.Variable(K)
    alpha_loss = 0
    alpha_losses = []
    min_welfares = []
    for k in range(K):
        alpha_loss = 0
        for i in range(n):
            alpha_loss += L_X[i, learned_predictions_all[k][i]]
        alpha_losses.append(alpha_loss)
        #print("Hello: ", alpha_losses)
        for i in range(num_groups):
            utls = []
            total_ii_utl = 0
            for li in range(group_sizes[i]):
                total_ii_utl += group_UX[i][li, learned_predictions[i][k][li]]
            #print("k: ", k, "i: ", i, "utl: ", total_ii_utl)
            utls.append(total_ii_utl / group_sizes[i])
        min_welfares.append(min(utls))

    objective = cp.Maximize(-cp.sum(cp.multiply(alpha_losses, alphas))\
            + lamb*cp.sum(cp.multiply(min_welfares, alphas)))

    constraints = []
    for k in range(K):
        constraints.append(alphas[k] >= 0)
    constraints.append(cp.sum(alphas) == 1)

    prob = cp.Problem(objective, constraints)
    #try:
    results = prob.solve(cp.SCS, verbose=False)  #, feastol=1e-5, abstol=1e-5)
    #except:
    #return 0,0,0,0
    opt_alphas = np.array(alphas.value).flatten()
    #print("XIS")
    #print(np.array(xis.value))
    return learned_betas, learned_predictions_all, learned_predictions, opt_alphas
Esempio n. 18
0
X_opt = np.zeros((n, T_tot + 1))

x0 = np.array([[-4.5], [2]])
x = x0
X_opt[:, [0]] = x0
J_opt = 0
xf = np.array([[0], [0]])

obj = 0.0
for i in range(0, T_tot):
    X = cp.Variable((n, T + 1))
    U = cp.Variable((m, T))
    # pdb.set_trace()
    constraints = [
        cp.max(X) <= x_max,
        cp.min(X) >= x_min,
        cp.max(U) <= u_max,
        cp.min(U) >= u_min,
        X[:, [0]] == x0,
        #                      X[:,[-1]] == xf
        X[:, 1:] == cp.matmul(A, X[:, :-1]) + cp.matmul(B, U)
    ]

    for j in range(T):
        obj += cp.atoms.quad_form(X[:, j], Q)
        obj += cp.atoms.quad_form(U[:, j], R)

    obj += cp.atoms.quad_form(X[:, T], Q)

    # pdb.set_trace()
    prob = cp.Problem(cp.Minimize(obj), constraints)
Esempio n. 19
0
 def make_box_constraint(self, X, x_max, x_min):
     constraint_upr = cp.max(X, axis=0) <= x_max
     constraint_lwr = cp.min(X, axis=0) >= x_min
     return [constraint_upr, constraint_lwr]
def penalty(t_var):
	time_p = 0.0; overlap_p = 0.0; lunchtime_p = 0.0
	fri_class_p = 0.0; mon_class_p = 0.0
	
	# First: Penalize classes at bad times
	for j in range(J):
		if 1.5 == class_block_types[j]:
			# 0 if time is before end of business hours
			# ex: if time is 7 (6pm - 7:30pm) and, (7-c)_+ = 1
			time_p += cvx.pos(t_var[j] - BUSINESS_HOURS_END_1_5)
			# 0 if time is after start of business hours
			# example: if time is 1 (8am) and (2-1)
			time_p += cvx.pos(BUSINESS_HOURS_START_1_5 - t_var[j])
		else: 
			time_p += cvx.pos(t_var[j] - BUSINESS_HOURS_END_1_5)
			time_p += cvx.pos(BUSINESS_HOURS_START_1_0 - t_var[j])
		
		# penalty for Friday and Monday classes
		# promotes MW and TuTh over WF for 2-days-per-week classes
#		fri_class_p += d_var[j,-1]
#		mon_class_p += d_var[j,:] - d_var[j,:]

	# Second, Penalize classes overlapping in soft groups
	for group in soft_overlap_groups:
		for i in group:
			for j in group:
				if j > i: # avoid double penalty, e.g. 1<->2, 2<->1
					print("Penalizing {}, {}".format(course_indices_to_names[i], course_indices_to_names[j]))
					idx_1 = i; idx_2 = j;
					# code reuse, beware
					c1_start = t_var[idx_1]
					c2_start = t_var[idx_2]
					d1 = d_var[idx_1,:]
					d2 = d_var[idx_2,:]

					# handle the constraint between a 1.5 hour and 1 hour class
					if 1.5 == class_block_types[idx_1] and 1.0 == class_block_types[idx_2]:
						# Convert to 1 hour overlap
						c1_start = c1_start + 1 + (c1_start-1)/2

					elif 1.5 == class_block_types[idx_2] and 1.0 == class_block_types[idx_1]:
						# Convert to 1 hour overlap
						c2_start = c2_start + 1 + (c2_start-1)/2

					# Add the penalty
					# Want 0 >= cvx.max(d1 + d2) - 1
					# if all are violated, e.g. positive
					# we get a penalty
					# Classes DO overlap when:
					#   t1[ ]t2
					# 1[      ]c2
					# and max(d1 + d2) - 2 == 0 or -1
					# min(-d1 + -d2) + 1 = 0 (no violation) or -1 (violation)
					# so c1 - t1 + -10*(max(d1 + d2) - 2) >= 0 for overlap
					# -c1 + t1 + 10*(max(d1 + d2) - 2) <= 0 for overlap
					# Don't question it!
					# It isn't a DCP error!
					overlap_p += \
					cvx.pos(c1_start - c2_start - (cvx.min(-d1 + -d2) + 2)) + \
					cvx.pos(c1_start + class_lengths[j] - c2_start - (cvx.min(-d1 + -d2) + 2))
					
	# Third. Penalize classes occurring at lunchtime.
	# Implemented with:
	# Fourth. Penalize classes "bunched together" in time. e.g. all in the morning.
	# This is necessary to spread the classes throughout the day.
	# Use the idea that each class is slightly "attracted to" a random "good" time.
	# That should spread them out.
	# While we're at it, don't "attract" courses to lunchtime spots.
	BEST_1_5_SPOTS = [1,2,4,5,6]*((J+4)//5)
	BEST_1_0_SPOTS = [2,3,4,6,7,8]*((J+5)//6)
	
	cluster_penalty = 0.0
	for j in range(J):
		
		#fri_class_p += d_var[j,-1]
		#mon_class_p +=cvx.sum( d_var[j,0:-1])

		if 1.5 == class_block_types[j]:
			cluster_penalty += cvx.abs(t_var[j] - np.random.choice(BEST_1_5_SPOTS, replace=False))
		else:
			cluster_penalty += cvx.abs(t_var[j] - np.random.choice(BEST_1_0_SPOTS, replace=False))

	# Fifth. Promote spreading courses out over the week, e.g. not clustered on a single day.
	
	# Averaging term - total number of course meetings / total days
	AVG_MEETINGS_PER_DAY = np.sum(class_day_types)/D
	# spread in days
	day_spread = 0
	for d in range(0,D):
		day_spread += cvx.abs(cvx.sum(d_var[:,d]) - AVG_MEETINGS_PER_DAY)

	# ADJUST RELATIVE WEIGHTS HERE
	return 2*time_p + 10 * overlap_p + day_spread*1 + cluster_penalty*1
Esempio n. 21
0
def remove_dc_from_spad_test(noisy_spad,
                             bin_edges,
                             bin_weight,
                             use_anscombe,
                             use_quad_over_lin,
                             use_poisson,
                             use_squared_falloff,
                             lam1=1e-2,
                             lam2=1e-1,
                             eps_rel=1e-5):
    def anscombe(x):
        return 2 * np.sqrt(x + 3. / 8)

    def inv_anscombe(x):
        return (x / 2)**2 - 3. / 8

    assert len(noisy_spad.shape) == 1
    C = noisy_spad.shape[0]

    assert bin_edges.shape == (C + 1, )
    bin_widths = bin_edges[1:] - bin_edges[:-1]
    spad_equalized = noisy_spad / bin_widths
    x = cp.Variable((C, ), "signal")
    z = cp.Variable((1, ), "dc")
    nx = cp.Variable((C, ), "signal noise")
    nz = cp.Variable((C, ), "dc noise")
    if use_poisson:
        # Need tricky stuff
        if use_anscombe:
            #             plt.figure()
            #             plt.bar(range(len(spad_equalized)), spad_equalized, log=True)
            #             plt.title("Before")
            #         d_ans = cp.Variable((C,), "denoised anscombe")
            # Apply Anscombe Transform to data:
            spad_ans = anscombe(spad_equalized)
            # Apply median filter to remove Gaussian Noise
            spad_ans_filt = scipy.signal.medfilt(spad_ans, kernel_size=15)
            # Apply Inverse Anscombe Transform
            spad_equalized = inv_anscombe(spad_ans_filt)


#             plt.figure()
#             plt.bar(range(len(spad_equalized)), spad_equalized, log=True)
#             plt.title("After")

        if use_quad_over_lin:
            obj = \
                    cp.sum([cp.quad_over_lin(nx[i], x[i]) for i in range(C)]) + \
                    cp.sum([cp.quad_over_lin(nz[i], z) for i in range(C)]) + \
                    lam2 * cp.sum(bin_weight*cp.abs(x))
            constr = [
                x >= 0, x + nx >= 0, z >= cp.min(spad_equalized),
                z + nz >= cp.min(spad_equalized),
                x + nx + z + nz == spad_equalized
            ]
            prob = cp.Problem(cp.Minimize(obj), constr)
            prob.solve(solver=cp.ECOS, verbose=True, reltol=eps_rel)
        else:
            obj = cp.sum_squares(spad_equalized - (x + z)) + lam2 * cp.sum(
                bin_weight * cp.abs(x))
            constr = [x >= 0, z >= 0]
            prob = cp.Problem(cp.Minimize(obj), constr)
            prob.solve(solver=cp.OSQP, verbose=True, eps_rel=eps_rel)
    else:
        # No need for tricky stuff
        obj = cp.sum_squares(spad_equalized -
                             (x + z)) + 1e0 * cp.sum(bin_weight * cp.abs(x))
        constr = [x >= 0, z >= 0]
        prob = cp.Problem(cp.Minimize(obj), constr)
        prob.solve(solver=cp.OSQP, eps_rel=eps_rel)
    denoised_spad = np.clip(x.value * bin_widths, a_min=0., a_max=None)
    print("z.value", z.value)
    return denoised_spad
Esempio n. 22
0
    def fit(self, X, y):
        start_time = time.time()

        # Check that X and y have correct shape
        X, y = check_X_y(X, y)

        # Store the classes seen during fit
        self.classes_ = unique_labels(y)

        if len(self.classes_) > 2:
            print(
                "Dilation-Erosion Morphological Perceptron can be used for binary classification!"
            )
            return

        if self.Split2Beta == True:
            skf = StratifiedKFold(n_splits=3, shuffle=True)
            WM_index, beta_index = next(iter(skf.split(X, y)))
            X_WM, X_beta = X[WM_index], X[beta_index]
            y_WM, y_beta = y[WM_index], y[beta_index]
        else:
            X_WM, X_beta = X, X
            y_WM, y_beta = y, y

        M, N = X_beta.shape

        indPos = (y_WM == self.classes_[1])
        Xpos = X_WM[indPos, :]
        Xneg = X_WM[~indPos, :]
        Mpos = Xpos.shape[0]
        Mneg = Xneg.shape[0]

        if self.weighted == True:
            Lpos = 1 / pairwise_distances(Xpos, [np.mean(Xpos, axis=0)],
                                          metric="euclidean").flatten()
            Lneg = 1 / pairwise_distances(Xneg, [np.mean(Xneg, axis=0)],
                                          metric="euclidean").flatten()
            nuPos = Lpos / Lpos.max()
            nuNeg = Lneg / Lneg.max()
        else:
            nuPos = np.ones((Mpos))
            nuNeg = np.ones((Mneg))

        # Solve DCCP problem for dilation
        if self.ref == "mean":
            ref = -np.mean(Xneg, axis=0).reshape((1, N))
        elif self.ref == "maximum":
            ref = -np.max(Xneg, axis=0).reshape((1, N))
        elif self.ref == "minimum":
            ref = -np.min(Xneg, axis=0).reshape((1, N))
        else:
            ref = np.zeros((1, N))

        w = cp.Variable((1, N))
        xiPos = cp.Variable((Mpos))
        xiNeg = cp.Variable((Mneg))

        lossDil = cp.sum(nuPos * cp.pos(xiPos)) / Mpos + cp.sum(
            nuNeg * cp.pos(xiNeg)) / Mneg + self.C * cp.norm(w - ref, 1)
        objectiveDil = cp.Minimize(lossDil)

        ZposDil = cp.max(np.ones((Mpos, 1)) @ w + Xpos, axis=1)
        ZnegDil = cp.max(np.ones((Mneg, 1)) @ w + Xneg, axis=1)
        constraintsDil = [ZposDil >= -xiPos, ZnegDil <= xiNeg]

        probDil = cp.Problem(objectiveDil, constraintsDil)
        probDil.solve(solver=self.solver, method='dccp', verbose=self.verbose)
        self.dil_ = (w.value).flatten()

        # Solve DCCP problem for erosion
        if self.ref == "mean":
            ref = -np.mean(Xpos, axis=0).reshape((1, N))
        elif self.ref == "maximum":
            ref = -np.min(Xpos, axis=0).reshape((1, N))
        elif self.ref == "minimum":
            ref = -np.max(Xpos, axis=0).reshape((1, N))
        else:
            ref = np.zeros((1, N))

        m = cp.Variable((1, N))
        etaPos = cp.Variable((Mpos))
        etaNeg = cp.Variable((Mneg))

        lossEro = cp.sum(nuPos * cp.pos(etaPos)) / Mpos + cp.sum(
            nuNeg * cp.pos(etaNeg)) / Mneg + self.C * cp.norm(m - ref, 1)
        objectiveEro = cp.Minimize(lossEro)

        ZposEro = cp.min(np.ones((Mpos, 1)) @ m + Xpos, axis=1)
        ZnegEro = cp.min(np.ones((Mneg, 1)) @ m + Xneg, axis=1)
        constraintsEro = [ZposEro >= -etaPos, ZnegEro <= etaNeg]

        probEro = cp.Problem(objectiveEro, constraintsEro)
        probEro.solve(solver=self.solver, method='dccp', verbose=self.verbose)
        self.ero_ = (m.value).flatten()

        # Fine tune beta
        if self.beta == None:
            beta = cp.Variable(nonneg=True)
            beta.value = 0.5

            if self.beta_loss == "squared_hinge":
                # Squared Hinge Loss
                lossBeta = cp.sum_squares(
                    cp.pos(-cp.multiply(
                        2 * ((y_beta == self.classes_[1]).astype(int)) - 1,
                        beta *
                        cp.max(np.ones((M, 1)) @ w.value + X_beta, axis=1) +
                        (1 - beta) *
                        cp.min(np.ones((M, 1)) @ m.value + X_beta, axis=1))))
            else:
                # Hinge Loss
                lossBeta = cp.sum(
                    cp.pos(-cp.multiply(
                        2 * ((y_beta == self.classes_[1]).astype(int)) - 1,
                        beta *
                        cp.max(np.ones((M, 1)) @ w.value + X_beta, axis=1) +
                        (1 - beta) *
                        cp.min(np.ones((M, 1)) @ m.value + X_beta, axis=1))))

            constraintsBeta = [beta <= 1]
            probBeta = cp.Problem(cp.Minimize(lossBeta), constraintsBeta)
            probBeta.solve(solver=cp.SCS,
                           verbose=self.verbose,
                           warm_start=True)
            self.beta = beta.value

        if self.verbose == True:
            print("\nTime to train: %2.2f seconds." %
                  (time.time() - start_time))
        return self
Esempio n. 23
0
    def get_allocation(self, unflattened_throughputs, scale_factors,
                       unflattened_priority_weights, cluster_spec):
        all_throughputs, index = \
            self.flatten(d=unflattened_throughputs,
                         cluster_spec=cluster_spec,
                         priority_weights=unflattened_priority_weights)
        if all_throughputs is None or len(all_throughputs) == 0: return None
        (m, n) = all_throughputs[0].shape
        (job_ids, single_job_ids, worker_types, relevant_combinations) = index
        x = cp.Variable((m, n))

        # Row i of scale_factors_array is the scale_factor of job
        # combination i repeated len(worker_types) times.
        scale_factors_array = self.scale_factors_array(scale_factors, job_ids,
                                                       m, n)

        throughputs_no_packed_jobs = np.zeros((len(single_job_ids), n))
        for i, single_job_id in enumerate(single_job_ids):
            for j, worker_type in enumerate(worker_types):
                throughputs_no_packed_jobs[i, j] = \
                    unflattened_throughputs[single_job_id][worker_type]
        proportional_throughputs = self._proportional_policy.get_throughputs(
            throughputs_no_packed_jobs, (single_job_ids, worker_types),
            cluster_spec)

        objective_terms = []
        # Multiply throughputs by scale_factors to ensure that scale_factor
        # is taken into account while allocating times to different jobs.
        # A job run on 1 GPU should receive `scale_factor` more time than
        # a job run on `scale_factor` GPUs.
        import scipy.sparse as sp
        idx = []
        tputs = []
        # compute the obejctive in a vectorized fashion
        for i in range(len(all_throughputs)):
            indexes = relevant_combinations[single_job_ids[i]]
            idx += indexes
            proportional_throughput = float(proportional_throughputs[i])
            curr_throughputs = np.multiply(
                all_throughputs[i][indexes],
                scale_factors_array[indexes]) / proportional_throughput
            tputs.append(curr_throughputs)

        tputs = sp.csc_matrix(np.vstack(tputs))
        indexed_vars = x[idx]
        realized_tputs = cp.multiply(tputs, indexed_vars)
        # reshape so that the sum of each row gives the throughput
        realized_tputs_mat = cp.reshape(
            realized_tputs,
            (len(all_throughputs),
             int(np.prod(realized_tputs.shape) / len(all_throughputs))),
            order='C')

        objective_fn = cp.min(cp.sum(realized_tputs_mat, axis=1))

        objective = cp.Maximize(objective_fn)

        # Make sure the allocation can fit in the cluster.
        constraints = self.get_base_constraints(x, single_job_ids,
                                                scale_factors_array,
                                                relevant_combinations)

        # Explicitly constrain all allocation values with an effective scale
        # factor of 0 to be 0.
        # NOTE: This is not strictly necessary because these allocation values
        # do not affect the optimal allocation for nonzero scale factor
        # combinations.
        for i in range(m):
            for j in range(n):
                if scale_factors_array[i, j] == 0:
                    constraints.append(x[i, j] == 0)
        cvxprob = cp.Problem(objective, constraints)
        if self._solver == 'SCS':
            # anderson acceleration is sometimes unstable, and adds
            # significant overhead
            kwargs = {'acceleration_lookback': 0}
        else:
            kwargs = {}

        result = cvxprob.solve(solver=self._solver, **kwargs)

        if cvxprob.status != "optimal":
            print('WARNING: Allocation returned by policy not optimal!')

        return self.unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
Esempio n. 24
0
    def _init_edge_deps(self):
        self.delays = defaultdict(lambda: cvxpy.Variable(integer=True))
        self.delay_violations = []
        for edge in self.edges:
            src_node = self.id_to_node_lookup[edge.src]
            dst_node = self.id_to_node_lookup[edge.dst]
            # if they're not in the same partition, then guarantee inequality.
            enforce_inequality: int
            if not self._possible_in_same_partition(src_node, dst_node):
                enforce_inequality = 1
            else:
                dst_candidates = self._candidate_partitions(dst_node)
                inequalities = []
                for partition_type in self._candidate_partitions(src_node):
                    src_row = self._get_row(partition_type, src_node)
                    if partition_type not in dst_candidates:
                        inequalities.append(cvxpy.sum(src_row))
                        continue
                    dst_row = self._get_row(partition_type, dst_node)
                    inequalities.append(
                        cvxpy.sum(cvxpy.maximum(src_row - dst_row, 0)))
                enforce_inequality = cvxpy.maximum(*inequalities,
                                                   0) if inequalities else 0
            self.delay_violations.append(
                self._project_to_bool(
                    cvxpy.maximum(
                        self.delays[src_node] + enforce_inequality -
                        self.delays[dst_node], 0), self.num_nodes * 2))

        # for each node, compute its partition delay. Then constrain that the partition delay and actual delay are
        # close.
        partition_delays = {
            partition_type: cvxpy.Variable(shape=count, nonneg=True)
            for partition_type, count in self.partition_counts.items()
        }

        max_delay = 4 * sum(self.partition_counts.values())

        for partition_delay in partition_delays.values():
            self._add_constraint(partition_delay <= max_delay)

        for node in self.nodes:
            target_delay_min = [max_delay]
            target_delay_max = [0]
            for partition_type, node_to_loc in self.node_to_loc_map.items():
                if node not in node_to_loc:
                    continue

                loc = node_to_loc[node]
                row = self.partition_matrices[partition_type][loc, :]
                activation = row * -max_delay + max_delay
                target_delay_min.append(
                    cvxpy.min(partition_delays[partition_type] + activation))

                activation = row * max_delay - max_delay
                target_delay_max.append(
                    cvxpy.max(partition_delays[partition_type] + activation))
            target_min = cvxpy.minimum(*target_delay_min)
            self._add_pseudo_constraint(
                cvxpy.maximum(self.delays[node] - target_min, 0))

            target_max = cvxpy.maximum(*target_delay_max)
            self._add_pseudo_constraint(
                cvxpy.maximum(target_max - self.delays[node], 0))
    def _get_allocation(self, job_ids, priority_weights,
                        proportional_throughputs, scale_factors_array, m, n,
                        final_normalized_effective_throughputs,
                        normalized_effective_throughputs_so_far):
        M = self._M

        if self._lp is None:
            self._lp_variables_and_parameters = {}
            self._lp_variables_and_parameters['x'] = cp.Variable((m, n))
            self._lp_variables_and_parameters[
                'normalized_effective_throughputs_so_far_parameter'] = cp.Parameter(
                    len(job_ids))
            self._lp_variables_and_parameters[
                'normalized_effective_throughputs_lower_bounds_parameter'] = cp.Parameter(
                    len(job_ids))
            self._lp_variables_and_parameters[
                'multiplicative_terms_parameter'] = cp.Parameter(len(job_ids))
            self._lp_variables_and_parameters[
                'additive_terms_parameter'] = cp.Parameter(len(job_ids))

        x = self._lp_variables_and_parameters['x']
        normalized_effective_throughputs_so_far_parameter = \
            self._lp_variables_and_parameters[
                'normalized_effective_throughputs_so_far_parameter']
        normalized_effective_throughputs_lower_bounds_parameter = \
            self._lp_variables_and_parameters[
                'normalized_effective_throughputs_lower_bounds_parameter']
        multiplicative_terms_parameter = \
            self._lp_variables_and_parameters['multiplicative_terms_parameter']
        additive_terms_parameter = \
            self._lp_variables_and_parameters['additive_terms_parameter']

        effective_throughputs = self._get_effective_throughputs(x)
        normalized_effective_throughputs = cp.multiply(
            effective_throughputs,
            1.0 / proportional_throughputs.reshape(len(job_ids)))

        # Solve max-min optimization problem over all jobs with priority
        # weight > 0.
        normalized_effective_throughputs_so_far_parameter.value = \
            normalized_effective_throughputs_so_far
        objective_terms = normalized_effective_throughputs - \
            normalized_effective_throughputs_so_far_parameter
        multiplicative_terms = np.zeros(len(job_ids))
        mask = np.zeros(len(job_ids))
        additive_terms = np.zeros(len(job_ids))
        for i, job_id in enumerate(job_ids):
            if job_id not in final_normalized_effective_throughputs:
                if priority_weights[i] > 0.0:
                    multiplicative_terms[
                        i] = priority_weights[i] * scale_factors_array[i, 0]
                    mask[i] = 1.0 / multiplicative_terms[i]
                else:
                    additive_terms[i] = M
            else:
                additive_terms[i] = M
        multiplicative_terms_parameter.value = multiplicative_terms
        additive_terms_parameter.value = additive_terms

        normalized_effective_throughputs_lower_bounds = np.zeros(len(job_ids))
        for i, job_id in enumerate(job_ids):
            if job_id in final_normalized_effective_throughputs:
                normalized_effective_throughputs_lower_bounds[i] = \
                    final_normalized_effective_throughputs[job_id]
            else:
                normalized_effective_throughputs_lower_bounds[i] = \
                    normalized_effective_throughputs_so_far[i]
        normalized_effective_throughputs_lower_bounds_parameter.value = \
            normalized_effective_throughputs_lower_bounds

        if self._lp is None:
            self._lp_objective = cp.Maximize(
                cp.min(
                    cp.multiply(objective_terms,
                                multiplicative_terms_parameter) +
                    additive_terms_parameter))
            # Specify constraints.
            constraints = self._get_constraints(x, scale_factors_array)
            constraints.append(
                normalized_effective_throughputs >=
                normalized_effective_throughputs_lower_bounds_parameter)

            self._lp = cp.Problem(self._lp_objective, constraints)

        result = self._lp.solve(solver='ECOS', warm_start=True)

        return x.value, self._lp_objective.value, mask
Esempio n. 26
0
                        break
                N = len(data)

                #optimization problem
                tau = [0.1, 0.3, 0.5, 0.7, 0.9]
                l = 0
                q = 0
                M = len(points)
                A = cp.Variable((M + N, Q))
                b = cp.Variable(Q)
                Gsqrt = sp.linalg.sqrtm(Geps)

                hi = ((Geps @ (A @ W.T))[N:N + M])
                hj = (Geps @ (A @ W.T))
                soc_constraint = [
                    (1 / eta) * (U @ b)[l] + (1 / (eta)) * cp.min(
                        (hi @ e[l])) >= cp.norm((Gsqrt @ hj) @ e[l], 2)
                    for l in range(Q - 1)
                ]
                obj = 0
                Gn = np.array(extractSubMatrix(G, 0, N, 0, N + M))
                y = np.array(y)
                for q in range(Q):
                    for n in range(N):
                        obj += pinball(y[n] - ((Gn @ A)[n, q] + b[q]), tau[q])

                hl = (Gsqrt @ A)
                f1 = 0
                for q in range(Q):
                    f1 = f1 + cp.norm(hl @ eq[q], 2)**2
                bn = cp.norm(b, 2)
                prob = cp.Problem(cp.Minimize((1 / N) * obj),
Esempio n. 27
0
    def get_allocation_using_job_type_throughputs(
            self, unflattened_throughputs, job_id_to_job_type_key,
            scale_factors, unflattened_priority_weights, cluster_spec):
        job_ids = sorted(job_id_to_job_type_key.keys())
        if len(job_ids) == 0:
            return None
        job_type_keys = sorted(unflattened_throughputs.keys())
        worker_types = sorted(cluster_spec.keys())
        num_workers = \
            [cluster_spec[worker_type] for worker_type in worker_types]

        # Create a map from job type to list of job indexes.
        job_type_key_to_job_idx = {}
        for i, job_id in enumerate(job_ids):
            job_type_key = job_id_to_job_type_key[job_id]
            if job_type_key not in job_type_key_to_job_idx:
                job_type_key_to_job_idx[job_type_key] = []
            job_type_key_to_job_idx[job_type_key].append(i)

        # Num jobs.
        n = len(job_ids)
        # Num job_types.
        a = len(unflattened_throughputs.keys())
        # Num worker_types.
        m = len(worker_types)
        # Num varibles per job.
        num_vars_per_job = 1 + a

        # Set up scale factors.
        flattened_scale_factors = \
            np.reshape([scale_factors[job_id] for job_id in job_ids], (n, 1))
        scale_factors_array = np.tile(flattened_scale_factors,
                                      (1, num_vars_per_job * m))

        # Set up flattened job type throughputs.
        flattened_throughputs = np.zeros(shape=(a, (1 + a) * m),
                                         dtype=np.float32)
        for i, job_type_key in enumerate(job_type_keys):
            for k, worker_type in enumerate(worker_types):
                for j, other_job_type_key in enumerate([None] + job_type_keys):
                    if j > 0 and other_job_type_key[1] != job_type_key[1]:
                        flattened_throughputs[i, k * (1 + a) + j] = 0.0
                    else:
                        flattened_throughputs[i,k*(1+a)+j] = \
                            unflattened_throughputs[job_type_key][worker_type][other_job_type_key]

        # Set up masks to avoid double-counting allocation values when
        # computing constraint that the sum of allocation values of each
        # worker type must be <= the number of workers of that worker type.
        # TODO: Change this if we ever consider combinations larger than pairs.
        masks = np.full(shape=(n, num_vars_per_job), fill_value=0.5)
        masks[:, 0] = 1.0

        # Allocation matrix.
        x = cp.Variable((n, num_vars_per_job * m))

        constraints = [
            # All allocation values must be >= 0.
            x >= 0,
            # The sum of allocation values for each job must be <= 1.
            cp.sum(x, axis=1) <= 1
        ]

        # The sum of allocation values for each worker type must be <=
        # the number of workers of that type.
        per_worker_type_allocations = []
        for i in range(m):
            relevant_vars = \
                x[:,i*num_vars_per_job:(i+1)*num_vars_per_job]
            relevant_scale_factors = \
                scale_factors_array[:,i*num_vars_per_job:(i+1)*num_vars_per_job]
            per_worker_type_allocations.append(
                cp.sum(
                    cp.multiply(relevant_vars,
                                cp.multiply(relevant_scale_factors, masks))))
        constraints.append(
            cp.hstack(per_worker_type_allocations) <= num_workers)

        # Set the following constraints:
        # for all job type pairs a, b:
        #   sum of allocation of all jobs of type a paired with type b ==
        #   sum of allocation of all jobs of type b paired with type a
        lhs = []
        rhs = []
        for i, job_type_key_0 in enumerate(job_type_keys):
            for j, job_type_key_1 in enumerate(job_type_keys):
                if j <= i:
                    continue
                elif job_type_key_0[1] != job_type_key_1[1]:
                    continue

                # Retrieve the list of jobs of each type.
                job_type_0_jobs = job_type_key_to_job_idx[job_type_key_0]
                job_type_1_jobs = job_type_key_to_job_idx[job_type_key_1]

                for k in range(m):
                    job_type_0_mask = np.zeros(x.shape)
                    job_type_1_mask = np.zeros(x.shape)

                    # Allocation of job_type_0 jobs when paired with job_type_1
                    for job_idx in job_type_0_jobs:
                        offset = k * num_vars_per_job + 1 + j
                        job_type_0_mask[job_idx, offset] = 1

                    # Allocation of job_type_1 jobs when paired with job_type_0
                    for job_idx in job_type_1_jobs:
                        offset = k * num_vars_per_job + 1 + i
                        job_type_1_mask[job_idx, offset] = 1

                    lhs.append(cp.sum(x[job_type_0_mask == 1]))
                    rhs.append(cp.sum(x[job_type_1_mask == 1]))

        assert (len(lhs) == len(rhs))
        if len(lhs) > 0:
            constraints.append(cp.hstack(lhs) == cp.hstack(rhs))

        # Add constraints to make all variables of the form i-A where job i
        # is of job type A equal.
        for i, job_type_key in enumerate(job_type_keys):
            for k in range(m):
                same_job_type_vars = []
                job_type_jobs = job_type_key_to_job_idx[job_type_key]

                # Find all variables for job-job_type pairs where the job
                # types match.
                offset = k * num_vars_per_job + 1 + i
                for job_idx in job_type_jobs:
                    same_job_type_vars.append(x[job_idx, offset])

                # Constrain the variables to all be equal.
                c = cp.Variable()
                constraints.append(cp.hstack(same_job_type_vars) == c)

        throughputs_no_packed_jobs = np.zeros(
            (len(job_ids), len(worker_types)))
        for i, job_id in enumerate(job_ids):
            job_type_key = job_id_to_job_type_key[job_id]
            for j, worker_type in enumerate(worker_types):
                throughputs_no_packed_jobs[i, j] = \
                    unflattened_throughputs[job_type_key][worker_type][None]
        proportional_throughputs = self._proportional_policy.get_throughputs(
            throughputs_no_packed_jobs, (job_ids, worker_types), cluster_spec)

        # Allocation coefficients.
        all_coefficients = np.zeros((n, num_vars_per_job * m))
        for i, job_id in enumerate(job_ids):
            job_type_key = job_id_to_job_type_key[job_id]
            job_type_idx = job_type_keys.index(job_type_key)
            if len(job_type_key_to_job_idx[job_type_key]) == 1:
                for k, worker_type in enumerate(worker_types):
                    offset = k * num_vars_per_job + 1 + job_type_idx
                    constraints.append(x[i, offset] == 0.0)
            proportional_throughput = proportional_throughputs[i]
            all_coefficients[i] = \
                np.multiply(flattened_throughputs[job_type_idx],
                            scale_factors_array[i]) /\
                    (unflattened_priority_weights[job_id] * proportional_throughput)
        objective = \
            cp.Maximize(cp.min(cp.sum(cp.multiply(all_coefficients, x),
                                      axis=1)))

        cvxprob = cp.Problem(objective, constraints)
        result = cvxprob.solve(solver=self._solver)

        if cvxprob.status != "optimal":
            print('WARNING: Allocation returned by policy not optimal!')

        allocation = x.value.clip(min=0.0).clip(max=1.0)

        # Unflatten allocation.
        unflattened_allocation = {}
        for i, job_id in enumerate(job_ids):
            unflattened_allocation[job_id] = {}
            for j, worker_type in enumerate(worker_types):
                unflattened_allocation[job_id][worker_type] = {}
                for k, job_type_key in enumerate([None] + job_type_keys):
                    unflattened_allocation[job_id][worker_type][job_type_key] = \
                        allocation[i, j * num_vars_per_job + k]

        return self.convert_job_type_allocation(unflattened_allocation,
                                                job_id_to_job_type_key)
Esempio n. 28
0
def extractSubMatrix(matrix, rowStartIdx, rowEndIdx, colStartIdx, colEndIdx):

    result = [x[colStartIdx:colEndIdx] for x in matrix[rowStartIdx:rowEndIdx]]

    return result

    #we test the code on the engel data set , scaled using R and saved in a csv file

    df = pd.read_csv("C:/Users/malex/Desktop/scrm/code/SyntheticData.csv")
    df.columns = ["stock0", "loss between time 1&2"]

    y = df["loss between time 1&2"]
    data = []
    for i in range(len(df["stock0"])):
        data.append(df["stock0"][i])

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=42)
    data = X_train
    y = y_train
    foldX = []
    foldY = []
    y_train = np.array(y_train)
    X_train = np.array(X_train)
    y = []
    data = []
    for i in range(len(X_train)):
        data.append(X_train[i])
        y.append(y_train[i])
    data = np.array(data)
    y = np.array(y)
    for i, j in create_folds(X_train, 2):
        foldX.append(data[i].tolist())
        foldY.append(y[i].tolist())
    distance = []
    for i in range(len(data)):
        for j in range(len(data)):
            distance.append(abs(data[i] - data[j]))

    #go for 2 folds
    data = []
    y = []
    data1 = np.array(foldX[0])
    y1 = np.array(foldY[0])
    datatest1 = np.array(foldX[1])
    ytest1 = np.array(foldY[1])
    data2 = np.array(foldX[1])
    y2 = np.array(foldY[1])
    datatest2 = np.array(foldX[0])
    ytest2 = np.array(foldY[0])

    DataX = [data1, data2]
    DataY = [y1, y2]
    TestX = [datatest1, datatest2]
    TestY = [ytest1, ytest2]
    lmdg_v = [
        20.25, 91.125, 410.0625, 1845.28125, 5000, 8303.765625, 20000, 40000
    ]
    gamma_v = [np.median(distance)]
    b_v = [(10**log(i)) * max(np.abs(df['loss between time 1&2']))
           for i in [exp(1), 3, 6, exp(2), 10, 20]]
    perf = []
    performance = []
    lmdf = cp.Parameter()
    values = []
    perf2 = []
    X_test = np.array(X_test)
    y_test = np.array(y_test)
    start_time = time.time()

    #Running CV
    for gamma in gamma_v:
        print("s=", gamma)
        for lmdg in lmdg_v:
            for lmdb in b_v:
                lmd = lmdg

                for i in range(2):
                    #print("i=",i)
                    data = DataX[i]
                    y = DataY[i]
                    start_time2 = time.time()
                    minX0 = min(df["stock0"])
                    maxX0 = max(df["stock0"])
                    # minX1=min(df["stock1"])
                    # maxX1=max(df["stock1"])
                    #delta net
                    delta = 6
                    points = []
                    points = (np.arange(minX0, maxX0, delta)).tolist()

                    data2 = data
                    data = []
                    for k in range(len(data2)):
                        data.append(data2[k])

                    X = data + points

                    #computing the gram matrix
                    G = computeG(X, gamma)
                    Geps = G + (10**(-4) * np.ones((len(X), len(X))))
                    #computing the eta coefficient

                    eta = sqrt(2) * (1 - exp(-sqrt(2 * delta**2) /
                                             (gamma**2)))**(0.5)
                    #computing the W and U matrices
                    Q = 5
                    I = Q - 1
                    W = np.zeros((I, Q))
                    j = -1
                    for l in range(Q - 1):
                        j = j + 1
                        while j >= l:
                            W[l, j] = -1
                            W[l, j + 1] = 1
                            break
                    U = W
                    e = np.zeros((Q - 1, Q - 1))
                    l, j = 0, -1
                    for l in range(Q - 1):
                        j = j + 1
                        while j >= l:
                            e[l, j] = 1
                            break
                    eq = np.zeros((Q, Q))
                    l, j = 0, -1
                    for l in range(Q):
                        j = j + 1
                        while j >= l:
                            eq[l, j] = 1
                            break
                    N = len(data)

                    #optimization problem
                    tau = [0.1, 0.3, 0.5, 0.7, 0.95]
                    l = 0
                    q = 0
                    M = len(points)
                    A = cp.Variable((M + N, Q))
                    b = cp.Variable(Q)
                    Gsqrt = sp.linalg.sqrtm(Geps)

                    hi = ((Geps @ (A @ W.T))[N:N + M])
                    hj = (Geps @ (A @ W.T))
                    soc_constraint = [
                        (1 / eta) * (U @ b)[l] + (1 / (eta)) * cp.min(
                            (hi @ e[l])) >= cp.norm((Gsqrt @ hj) @ e[l], 2)
                        for l in range(Q - 1)
                    ]
                    obj = 0
                    Gn = np.array(extractSubMatrix(G, 0, N, 0, N + M))
                    y = np.array(y)
                    for q in range(Q):
                        for n in range(N):
                            obj += expectile(y[n] - ((Gn @ A)[n, q] + b[q]),
                                             tau[q])

                    hl = (Gsqrt @ A)
                    f1 = 0
                    for q in range(Q):
                        f1 = f1 + cp.norm(hl @ eq[q], 2)**2
                    bn = cp.norm(b, 2)
                    prob = cp.Problem(
                        cp.Minimize((1 / N) * obj),
                        soc_constraint + [bn <= lmdb] + [f1 <= lmd])
                    prob.solve(solver="MOSEK")
                    end_time2 = time.time()
                    #print("prob value =",obj.value)
                    perf.append(
                        getperformance(TestX[i].tolist(), points, TestY[i],
                                       A.value, Q, N, M, tau))
                values.append((lmd / 1000, lmdb))
                # print("prf value",np.mean(perf))
                performance.append(np.mean(perf))

                perf = []
    print(min(performance))
    minperf.append(min(performance))
Esempio n. 29
0
    def delay_gap(self):
        max_delay = self.partitions * self.delay_per_partition * 2

        # When the outputs start receiving
        self.external_destination_delay = external_destination_delay = cvxpy.Variable(name="External Delay")
        self.delays = delays = cvxpy.Variable(name="Delays", shape=self.num_nodes)
        partition_delays = cvxpy.Variable(name="Partition Delays", shape=self.partitions)
        for src, dst, _, _ in self.internal_edges:
            src_loc = self.node_to_loc_map[src]
            dst_loc = self.node_to_loc_map[dst]
            if not self._possible_in_same_partition(src_loc, dst_loc):
                # the constraint is always active if they can't be in the same partition
                activity_component = 0
            else:
                # strongly negative if in same partition, otherwise is 0
                not_same_partition = self._is_different_value(self.node_partitions[dst_loc],
                                                              self.node_partitions[src_loc])
                activity_component = not_same_partition * max_delay - max_delay
            min_start_delay = delays[src_loc] + activity_component + self.delay_per_partition + self.network_delay
            # self._add_constraint(
            #     delays[dst_loc] >= min_start_delay)
            self._add_pseudo_constraint(
                cvxpy.maximum(min_start_delay - delays[dst_loc], 0)
            )

            twiddle = src in self.retime_nodes or dst in self.retime_nodes
            self._add_constraint(delays[dst_loc] >= delays[src_loc] + twiddle)
            # self._add_constraint(delays[dst_loc] >= delays[src_loc])

        for i in range(self.num_nodes):
            # 0 if node is in partition otherwise strongly positive
            activation = self.node_to_partition_matrix[i, :] * -max_delay + max_delay
            target_delay = cvxpy.min(partition_delays + activation)
            # self._add_constraint(delays[i] <= target_delay)
            self._add_pseudo_constraint(cvxpy.maximum(delays[i] - target_delay, 0))

            # 0 if node is in partition otherwise strongly negative
            activation = self.node_to_partition_matrix[i, :] * max_delay - max_delay
            target_delay = cvxpy.max(partition_delays + activation)
            # self._add_constraint(delays[i] >= target_delay)
            self._add_pseudo_constraint(cvxpy.maximum(target_delay - delays[i], 0))

        # # constrain that the delay for nodes in the same partition are equal.
        # # We only care for nodes which might be in the same partition.
        # for loc1, loc2 in itertools.combinations(range(self.num_nodes), 2):
        #     if not self._possible_in_same_partition(loc1, loc2):
        #         continue
        #     # if the nodes are in the same partition, then impose an equality constraint on the delays
        #     peq = cvxpy.sum(
        #         cvxpy.maximum(self.node_to_partition_matrix[loc1, :] + self.node_to_partition_matrix[loc2, :] - 1, 0))
        #     dne = self._project_to_bool(cvxpy.abs(delays[loc1] - delays[loc2]))
        #     # dne = cvxpy.minimum(cvxpy.abs(delays[loc1] - delays[loc2]), 1)
        #     self._add_pseudo_constraint(cvxpy.maximum(peq + dne - 1.5, 0))
        #     # self._add_constraint(peq + dne <= 1.3)

        nodes_with_external_input = {edge.dst for edge in self.external_input_edges}
        for dst in nodes_with_external_input:
            # src is external, dst is internal. The destination must start at least
            self._add_constraint(
                self.delays[self.node_to_loc_map[dst]] >= self.network_delay + self.delay_per_partition)

        for src, dst, _, _ in self.external_output_edges:
            # self._add_constraint(delays[self.node_to_loc_map[
            #     src]] + self.network_delay + self.delay_per_partition <= external_destination_delay)
            self._add_pseudo_constraint(
                cvxpy.maximum(delays[self.node_to_loc_map[
                    src]] + self.network_delay + self.delay_per_partition - external_destination_delay, 0)
            )

        gaps = {
            "s": [],
            "v": []
        }
        for src, dst, tp, _ in self.edges:
            if self._is_internal_node(src):
                src_loc = self.node_to_loc_map[src]
                src_delay = delays[src_loc]
            else:
                # external input
                src_delay = 0
            if self._is_internal_node(dst):
                dst_loc = self.node_to_loc_map[dst]
                dst_delay = delays[dst_loc]
            else:
                dst_delay = external_destination_delay
            gaps[tp].append(dst_delay - src_delay)

        gap_costs = {
            "v": 1.0 / min(self.constraints.vin, self.constraints.vout),
            "s": 1.0 / min(self.constraints.sin, self.constraints.sout)
        }
        total_retime_cost = 0
        for tp in "sv":
            num_large_gaps = sum(
                [self._project_to_bool(cvxpy.maximum(gap - self.buffer_capacity, 0)) for gap in gaps[tp]], 0)
            # if merge_probability is low, then there's a high chance of requiring separate compute anyways.
            # if merge_probability is 1, then it requires gap_costs[tp]
            # if merge_probability is 0, then it requires 1 full PCU.
            retime_multi = gap_costs[tp] * self.merge_probability + (1 - gap_costs[tp]) * (1 - self.merge_probability)
            total_retime_cost += gap_costs[tp] * num_large_gaps * retime_multi

        # want to minimize total_delay

        def verify():
            for loc1, loc2 in itertools.combinations(range(self.num_nodes), 2):
                if self.node_partitions.value[loc1] == self.node_partitions.value[loc2]:
                    if delays[loc1].value != delays[loc2].value:
                        return False
            return True

        self.verifiers.append(verify)

        return total_retime_cost, external_destination_delay