예제 #1
0
    def test_signed_curvature(self):
        # Convex argument.
        expr = cvx.abs(1 + cvx.exp(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs( -cvx.entr(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.UNKNOWN)

        expr = cvx.abs( -cvx.log(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.UNKNOWN)

        # Concave argument.
        expr = cvx.abs( cvx.log(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.UNKNOWN)

        expr = cvx.abs( -cvx.square(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs( cvx.entr(cvx.Variable()) )
        self.assertEqual(expr.curvature, s.UNKNOWN)

        # Affine argument.
        expr = cvx.abs( cvx.NonNegative() )
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs( -cvx.NonNegative() )
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs( cvx.Variable() )
        self.assertEqual(expr.curvature, s.CONVEX)
예제 #2
0
def exact_uot(C, a, b, tau, vbo=False):
    nr = len(a)
    nc = len(b)
    X = cp.Variable((nr, nc), nonneg=True)

    row_sums = cp.sum(X, axis=1)
    col_sums = cp.sum(X, axis=0)

    obj = cp.sum(cp.multiply(X, C))

    obj -= tau * cp.sum(cp.entr(row_sums))
    obj -= tau * cp.sum(cp.entr(col_sums))

    obj -= tau * cp.sum(cp.multiply(row_sums, cp.log(a)))
    obj -= tau * cp.sum(cp.multiply(col_sums, cp.log(b)))

    obj -= 2 * tau * cp.sum(X)
    obj += tau * cp.sum(a) + tau * cp.sum(b)

    prob = cp.Problem(cp.Minimize(obj))

    prob.solve(solver='SCS', verbose=vbo)

    # print('UOT optimal value:', prob.value)

    return prob.value
예제 #3
0
    def optimize(self, beta_c, beta_d, weight):
        '''
        creates the objective function of the optimization problem!
        '''
        n = self.window[1] - self.window[0] + 1
        p = cvx.Variable((n, 1))
        z = cvx.Variable(1)
        d = np.array([list(np.arange(1, n, 1))]).T
        expr = (1.0 / beta_c) * (
            cvx.sum(-1 * cvx.entr(p[1:, [0]]) -
                    beta_d * cvx.multiply(d, p[1:, [0]]))) - (1.0 / beta_c) * (
                        cvx.log(p[0, [0]]) + cvx.entr(p[0, [0]])) + weight * z
        obj = cvx.Minimize(expr)
        constraints = [cvx.sum(p) == 1, p >= 0, p <= 1, z >= 0]
        for key, timePt in enumerate(
                list(np.arange(self.window[0], self.window[1], 1))):
            deltaPost = self.nowSt * cvx.sum(
                p[:key + 1 + 1, [0]]) - self.nowE * cvx.sum(
                    cvx.multiply(self.Glists[timePt + 1], p[:key + 1 + 1,
                                                            [0]]))
            deltaPre = self.nowSt * cvx.sum(
                p[:key + 1, [0]]) - self.nowE * cvx.sum(
                    cvx.multiply(self.Glists[timePt], p[:key + 1, [0]]))
            cexp = (self.load[timePt + 1] + deltaPost) - (self.load[timePt] +
                                                          deltaPre)
            constraints = constraints + [cexp <= z]
        for key, timePt in enumerate(
                list(np.arange(self.window[0] + 1, self.window[1] + 1, 1))):
            cexp2 = p[key + 1, [0]] - np.exp(beta_d * (key + 1)) * p[0, [0]]
            constraints = constraints + [cexp2 >= 0]

        prob = cvx.Problem(obj, constraints)
        prob.solve()

        return p.value, z.value, prob.status, prob.value
예제 #4
0
    def test_signed_curvature(self):
        # Convex argument.
        expr = cvx.abs(1 + cvx.exp(cvx.Variable()))
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs(-cvx.entr(cvx.Variable()))
        self.assertEqual(expr.curvature, s.UNKNOWN)

        expr = cvx.abs(-cvx.log(cvx.Variable()))
        self.assertEqual(expr.curvature, s.UNKNOWN)

        # Concave argument.
        expr = cvx.abs(cvx.log(cvx.Variable()))
        self.assertEqual(expr.curvature, s.UNKNOWN)

        expr = cvx.abs(-cvx.square(cvx.Variable()))
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs(cvx.entr(cvx.Variable()))
        self.assertEqual(expr.curvature, s.UNKNOWN)

        # Affine argument.
        expr = cvx.abs(cvx.Variable(nonneg=True))
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs(-cvx.Variable(nonneg=True))
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.abs(cvx.Variable())
        self.assertEqual(expr.curvature, s.CONVEX)
예제 #5
0
    def test_entr(self) -> None:
        """Test domain for entr.
        """
        expr = cp.entr(self.a)
        self.a.value = 2
        self.assertAlmostEqual(expr.grad[self.a], -np.log(2) - 1)

        self.a.value = 3
        self.assertAlmostEqual(expr.grad[self.a], -(np.log(3) + 1))

        self.a.value = -1
        self.assertAlmostEqual(expr.grad[self.a], None)

        expr = cp.entr(self.x)
        self.x.value = [3, 4]
        val = np.zeros((2, 2)) + np.diag(-(np.log([3, 4]) + 1))
        self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val)

        expr = cp.entr(self.x)
        self.x.value = [-1e-9, 4]
        self.assertAlmostEqual(expr.grad[self.x], None)

        expr = cp.entr(self.A)
        self.A.value = [[1, 2], [3, 4]]
        val = np.zeros((4, 4)) + np.diag(-(np.log([1, 2, 3, 4]) + 1))
        self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), val)
예제 #6
0
    def test_lml(self):
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x @ y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        prob = cp.Problem(cp.Minimize(obj), cons)
        lml = CvxpyLayer(prob, [x], [y])

        x_th = jnp.array([1., -1., -1., -1.])
        check_grads(lml, (x_th, ), order=1, modes=['rev'])
예제 #7
0
    def test_lml(self):
        set_seed(1)
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        prob = cp.Problem(cp.Minimize(obj), cons)
        lml = CvxpyLayer(prob, [x], [y])

        x_th = torch.DoubleTensor([1., -1., -1., -1.])
        x_th.requires_grad_(True)
        torch.autograd.gradcheck(lml, x_th, eps=1e-5, atol=1e-4, rtol=1e-4)
예제 #8
0
    def test_lml(self) -> None:
        np.random.seed(0)
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x @ y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        problem = cp.Problem(cp.Minimize(obj), cons)

        x.value = np.array([1., -1., -1., -1.])
        # TODO(akshayka): This tolerance is too low.
        gradcheck(problem, solve_methods=[s.SCS], atol=1e-2)
        perturbcheck(problem, solve_methods=[s.SCS], atol=1e-4)
예제 #9
0
def sigmoid():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('sigmoid')
    npr.seed(0)

    n = 4
    _x = cp.Parameter((n, 1))
    _y = cp.Variable(n)
    obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y) + cp.entr(1. - _y)))
    prob = cp.Problem(obj)

    _x.value = npr.randn(n, 1)

    prob.solve(solver=cp.SCS)
    print(_y.value)
예제 #10
0
    def __init__(self, temperature=1, formulation='analytical', n_assets=None, max_weight=1):
        super().__init__()

        self.temperature = temperature

        if formulation not in {'analytical', 'variational'}:
            raise ValueError('Unrecognized formulation {}'.format(formulation))

        if formulation == 'variational' and n_assets is None:
            raise ValueError('One needs to provide n_assets for the variational formulation.')

        if formulation == 'analytical' and max_weight != 1:
            raise ValueError('Cannot constraint weights via max_weight for analytical formulation')

        if formulation == 'variational' and n_assets * max_weight < 1:
            raise ValueError('One cannot create fully invested portfolio with the given max_weight')

        self.formulation = formulation

        if formulation == 'analytical':
            self.layer = torch.nn.Softmax(dim=1)
        else:
            x = cp.Parameter(n_assets)
            w = cp.Variable(n_assets)
            obj = -x @ w - cp.sum(cp.entr(w))
            cons = [cp.sum(w) == 1.,
                    w <= max_weight]
            prob = cp.Problem(cp.Minimize(obj), cons)
            self.layer = CvxpyLayer(prob, [x], [w])
예제 #11
0
    def test_dcp_curvature(self):
      expr = 1 + cvx.exp(cvx.Variable())
      self.assertEqual(expr.curvature, s.CONVEX)

      expr = cvx.Parameter()*cvx.NonNegative()
      self.assertEqual(expr.curvature, s.AFFINE)

      f = lambda x: x**2 + x**0.5
      expr = f(cvx.Constant(2))
      self.assertEqual(expr.curvature, s.CONSTANT)

      expr = cvx.exp(cvx.Variable())**2
      self.assertEqual(expr.curvature, s.CONVEX)

      expr = 1 - cvx.sqrt(cvx.Variable())
      self.assertEqual(expr.curvature, s.CONVEX)

      expr = cvx.log( cvx.sqrt(cvx.Variable()) )
      self.assertEqual(expr.curvature, s.CONCAVE)

      expr = -( cvx.exp(cvx.Variable()) )**2
      self.assertEqual(expr.curvature, s.CONCAVE)

      expr = cvx.log( cvx.exp(cvx.Variable()) )
      self.assertEqual(expr.is_dcp(), False)

      expr = cvx.entr( cvx.NonNegative() )
      self.assertEqual(expr.curvature, s.CONCAVE)

      expr = ( (cvx.Variable()**2)**0.5 )**0
      self.assertEqual(expr.curvature, s.CONSTANT)
예제 #12
0
def predict_log_likelihoods(S, R, T, mu0, uptake_met_concs, excreted_met_concs ):
    m,n = S.shape
    rxns = S.columns
    mets = S.index
    log_c = cvx.Variable(m)
    log_likelihood = cvx.Variable(n)
    deltaG0 = S.T.dot(mu0)
    log_Q = S.as_matrix().T*log_c
    log_K = cvx.Constant(-1.0/(R*T))*deltaG0
    mu = R*T*log_c + mu0.values

    uptake_met_indices = S.index.get_loc(uptake_met_concs.index)
    excreted_met_indices = S.index.get_loc(excreted_met_concs.index)
    
    obj = cvx.Maximize(cvx.sum_entries( cvx.entr( log_likelihood )))
    constraints = [log_c[uptake_met_indices] == uptake_met_concs.values,
                   log_c[excreted_met_indices] == excreted_met_concs.values,

                   log_likelihood == log_K - log_Q,

                   mu >= np.sum(mu[excreted_met_indices]),
                   mu <= np.sum(mu[uptake_met_indicies])
                   ]
    prob = cvx.Problem(obj, constraints)
    prob.solve(verbose=True)
    return pd.DataFrame(log_c.value, index=mets), pd.DataFrame(log_likelihood.value, index=rxns)
예제 #13
0
    def test_entropy_maximization(self):
        set_seed(243)
        n, m, p = 5, 3, 2

        tmp = np.random.rand(n)
        A_np = np.random.randn(m, n)
        b_np = A_np.dot(tmp)
        F_np = np.random.randn(p, n)
        g_np = F_np.dot(tmp) + np.random.rand(p)

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - .001 * cp.sum_squares(x))
        constraints = [A * x == b, F * x <= g]
        prob = cp.Problem(obj, constraints)
        layer = CvxpyLayer(prob, [A, b, F, g], [x])

        A_tch, b_tch, F_tch, g_tch = map(
            lambda x: torch.from_numpy(x).requires_grad_(True),
            [A_np, b_np, F_np, g_np])
        torch.autograd.gradcheck(
            lambda *x: layer(*x, solver_args={"eps": 1e-10}),
            (A_tch, b_tch, F_tch, g_tch),
            eps=1e-5,
            atol=1e-4,
            rtol=1e-4)
예제 #14
0
    def test_entropy_maximization(self):
        key = random.PRNGKey(0)
        n, m, p = 5, 3, 2

        key, k1, k2, k3, k4 = random.split(key, num=5)
        tmp = random.normal(k1, shape=(n, ))
        A_np = random.normal(k2, shape=(m, n))
        b_np = A_np.dot(tmp)
        F_np = random.normal(k3, shape=(p, n))
        g_np = F_np.dot(tmp) + random.normal(k4, shape=(p, ))

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - .01 * cp.sum_squares(x))
        constraints = [A @ x == b, F @ x <= g]
        prob = cp.Problem(obj, constraints)
        layer = CvxpyLayer(prob, [A, b, F, g], [x])

        A_jax, b_jax, F_jax, g_jax = map(lambda x: jnp.array(x),
                                         [A_np, b_np, F_np, g_np])

        check_grads(layer, (A_jax, b_jax, F_jax, g_jax),
                    order=1,
                    modes=['rev'])
예제 #15
0
    def test_dcp_curvature(self):
        expr = 1 + cvx.exp(cvx.Variable())
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.Parameter()*cvx.Variable(nonneg=True)
        self.assertEqual(expr.curvature, s.AFFINE)

        f = lambda x: x**2 + x**0.5  # noqa E731
        expr = f(cvx.Constant(2))
        self.assertEqual(expr.curvature, s.CONSTANT)

        expr = cvx.exp(cvx.Variable())**2
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = 1 - cvx.sqrt(cvx.Variable())
        self.assertEqual(expr.curvature, s.CONVEX)

        expr = cvx.log(cvx.sqrt(cvx.Variable()))
        self.assertEqual(expr.curvature, s.CONCAVE)

        expr = -(cvx.exp(cvx.Variable()))**2
        self.assertEqual(expr.curvature, s.CONCAVE)

        expr = cvx.log(cvx.exp(cvx.Variable()))
        self.assertEqual(expr.is_dcp(), False)

        expr = cvx.entr(cvx.Variable(nonneg=True))
        self.assertEqual(expr.curvature, s.CONCAVE)

        expr = ((cvx.Variable()**2)**0.5)**0
        self.assertEqual(expr.curvature, s.CONSTANT)
예제 #16
0
    def test_entropy_maximization(self) -> None:
        np.random.seed(0)
        n, m, p = 5, 3, 2

        tmp = np.random.rand(n)
        A_np = np.random.randn(m, n)
        b_np = A_np.dot(tmp)
        F_np = np.random.randn(p, n)
        g_np = F_np.dot(tmp) + np.random.rand(p)

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - cp.sum_squares(x))
        constraints = [A @ x == b, F @ x <= g]
        problem = cp.Problem(obj, constraints)
        A.value = A_np
        b.value = b_np
        F.value = F_np
        g.value = g_np
        gradcheck(problem,
                  solve_methods=[s.SCS],
                  atol=1e-2,
                  eps=1e-8,
                  max_iters=10_000)
        perturbcheck(problem, solve_methods=[s.SCS], atol=1e-4)
예제 #17
0
def findWeightsLP(data=None):
    no_of_queries_lp = dbutils.DBUtils.no_of_queries_lp()
    support_count = dbutils.DBUtils.no_of_elements_in_support_set()
    if data == None:
        data = Utils.PricerUtils.disagreementMatrixLP(dbutils.DBUtils.cursor,
                                                      no_of_queries_lp,
                                                      support_count)
    for i in range(0, support_count):
        data[i][0] = 1
    inp = np.asarray(data)
    inp = np.delete(inp, 0, 1)
    x = cvx.Variable(1, support_count + 1)
    A = inp
    c = np.asarray([table_country.c])
    obj = cvx.Maximize(cvx.sum_entries(cvx.entr(x)))
    constraints = [x * A == c]
    prob = cvx.Problem(obj, constraints)
    prob.solve(solver=cvx.SCS, verbose=True, max_iters=10000)
    if prob.status == cvx.INFEASIBLE:
        return None
    weights = []
    for i in np.nditer(x.value, flags=['refs_ok']):
        weights.append(i * 100)
    print "size - ", x.size
    with open('weights9999.txt', 'wb') as f:
        pickle.dump(weights, f)
    return weights
예제 #18
0
def balance_cvx(hh_table, A, w, mu=None, verbose_solver=False):
    """Maximum Entropy allocaion method for a single unit

    Args:
        hh_table (numpy matrix): Table of households categorical data
        A (numpy matrix): Area marginals (controls)
        w (numpy array): Initial household allocation weights
        mu (numpy array): Importance weights of marginals fit accuracy
        verbose_solver (boolean): Provide detailed solver info

    Returns:
        (numpy matrix, numpy matrix): Household weights, relaxation factors
    """

    n_samples, n_controls = hh_table.shape
    x = cvx.Variable(n_samples)

    if mu is None:
        objective = cvx.Maximize(
            cvx.sum_entries(cvx.entr(x) + cvx.mul_elemwise(cvx.log(w.T), x)))

        constraints = [
            x >= 0,
            x.T * hh_table == A,
        ]
        prob = cvx.Problem(objective, constraints)
        prob.solve(solver=cvx.SCS, verbose=verbose_solver)

        return x.value

    else:
        # With relaxation factors
        z = cvx.Variable(n_controls)

        objective = cvx.Maximize(
            cvx.sum_entries(cvx.entr(x) + cvx.mul_elemwise(cvx.log(w.T), x)) +
            cvx.sum_entries(mu * (cvx.entr(z))))

        constraints = [
            x >= 0,
            z >= 0,
            x.T * hh_table == cvx.mul_elemwise(A, z.T),
        ]
        prob = cvx.Problem(objective, constraints)
        prob.solve(solver=cvx.SCS, verbose=verbose_solver)

        return x.value, z.value
예제 #19
0
def clean_dictionary(phrase_file):
    lexicon = pt.getPhraseEntriesFromTable(phrase_file)
    lexicon = filter(pt.filterLex, lexicon)
    entries = list((entry['srcphrase'], entry['tgtphrase'], \
        entry['probValues'][0], entry['probValues'][1], \
        entry['probValues'][2], entry['probValues'][3]) \
        for entry in lexicon)

    # Make it completely random. Which two distributions we choose to work with
    #direction = True if np.random.random() <= 0.5 else False;
    direction = True
    if direction:
        #srctotgt
        pprobs = np.asarray([X[2] for X in entries])
        lprobs = np.asarray([X[4] for X in entries])
        vocab = set(X[0] for X in entries)
        index = 0
    else:
        #tgttosrc
        pprobs = np.asarray([X[3] for X in entries])
        lprobs = np.asarray([X[5] for X in entries])
        vocab = set(X[1] for X in entries)
        index = 1

    vocab = sorted(list(vocab))
    vocab = dict((phrase, idx) for idx, phrase in enumerate(vocab))
    groups = sparse.dok_matrix((len(vocab), len(entries)), dtype=float)
    for idx, entry in enumerate(entries):
        groups[vocab[entry[index]], idx] = 1
    groups = groups.tocsc()

    sparse_dists = convex_cleanup(pprobs, lprobs, groups)
    global_sol = None
    global_entropy = -100
    for dist in sparse_dists:
        solution = dist.value
        entropy = cvx.sum_entries(cvx.entr(solution)).value
        if entropy > global_entropy:
            global_sol = solution
        print(np.count_nonzero(solution),
              np.min(solution),
              np.max(solution),
              entropy,
              file=stderr)
        #solution = list(solution.getA1());

    global_sol = list(global_sol.getA1())
    groups = groups.todok()
    pruned_dictionary = ("%s\t%s\t%.4f" %(entries[key[1]][0], \
        entries[key[1]][1], \
        prob) \
        for key, prob in zip(sorted(groups.keys()), solution))

    random_utils.lines_to_file('', pruned_dictionary)

    return
예제 #20
0
 def test_entr(self):
     """Test a problem with entr.
     """
     for n in [5, 10, 25]:
         print(n)
         x = cp.Variable(n)
         obj = cp.Maximize(cp.sum(cp.entr(x)))
         p = cp.Problem(obj, [cp.sum(x) == 1])
         p.solve(solver=cp.SCS)
         self.assertItemsAlmostEqual(x.value, n*[1./n])
예제 #21
0
 def test_entr(self):
     """Test a problem with entr.
     """
     if cvx.SUPER_SCS in cvx.installed_solvers():
         for n in [5, 10, 25]:
             print(n)
             x = cvx.Variable(n)
             obj = cvx.Maximize(cvx.sum(cvx.entr(x)))
             p = cvx.Problem(obj, [cvx.sum(x) == 1])
             p.solve(solver='SUPER_SCS', verbose=True)
             self.assertItemsAlmostEqual(x.value, n*[1./n])
예제 #22
0
 def __init__(self, nb_measure, alpha=0.95):
     self.alpha = alpha
     self.z = cp.Variable(nb_measure, nonneg=True)
     self.p = cp.Parameter(nb_measure, nonneg=True)
     self.v = cp.Parameter(nb_measure)
     dkl = cp.matmul(self.p, -cp.entr(self.z))
     objective = cp.Maximize(cp.matmul(self.p, cp.multiply(self.v, self.z)))
     constraints = [
         dkl <= np.log(1 / (1 - self.alpha)),
         cp.matmul(self.p, self.z) == 1
     ]
     self.problem = cp.Problem(objective, constraints)
 def test_entr_prob(self):
     """Test a problem with entr.
     """
     for n in [5, 10, 25]:
         print(n)
         x = cvx.Variable(n)
         obj = cvx.Maximize(cvx.sum(cvx.entr(x)))
         p = cvx.Problem(obj, [cvx.sum(x) == 1])
         p.solve(solver=cvx.ECOS, verbose=True)
         self.assertItemsAlmostEqual(x.value, n * [1. / n])
         p.solve(solver=cvx.SCS, verbose=True)
         self.assertItemsAlmostEqual(x.value, n * [1. / n], places=3)
예제 #24
0
    def test_lml(self):
        tf.random.set_seed(0)
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        problem = cp.Problem(cp.Minimize(obj), cons)
        lml = CvxpyLayer(problem, [x], [y])
        x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64)

        with tf.GradientTape() as tape:
            y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0]
            loss = -tf.math.log(y_opt[1])

        def f():
            problem.solve(solver=cp.SCS, eps=1e-10)
            return -np.log(y.value[1])

        grad = tape.gradient(loss, [x_tf])
        numgrad = numerical_grad(f, [x], [x_tf])
        np.testing.assert_almost_equal(grad, numgrad, decimal=3)
예제 #25
0
def cvar(v, lam, alpha):
    m = v.shape[0]
    p = cp.Variable(m, nonneg=True)
    obj = v @ p + lam * cp.sum(cp.entr(p)) - lam * np.log(m)

    constraints = [
        cp.max(p) <= 1.0 / (alpha * m),
        cp.sum(p) == 1,
    ]

    problem = cp.Problem(cp.Maximize(obj), constraints)
    problem.solve(solver=cp.MOSEK)
    return p.value
예제 #26
0
def get_distribution(N):
    #setting up versus array
    partitions = pg.partition_generator(N, 5, 0, N)
    num = len(partitions)
    cn = [[0] * num for i in range(num)]

    for i in range(num):
        for j in range(num):
            cn[i][j] = sim.get_probability(partitions[j], partitions[i])

    #setting up cvxpy
    dist = [0] * num
    for i in range(num):
        dist[i] = cp.Variable()
    objective = cp.Maximize(
        sum(cp.entr(dist[i])
            for i in range(len(partitions))))  #maximize entropy
    constraint1 = [
        sum([dist[j] * cn[i][j] for j in range(num)]) >= 0.5
        for i in range(num)
    ]
    constraint2 = [dist[i] >= 0 for i in range(num)]
    constraint3 = [sum(dist) == 1]
    problem = cp.Problem(objective, constraint1 + constraint2 + constraint3)
    problem.solve(solver=cp.ECOS)
    #printing distribution
    print("Entropy: ", problem.value)
    print("Ratio entropy/log(n): ", problem.value / np.log(num))
    print("Probability distribution: ")
    distribution_list = []
    for i in range(num):
        temp = (float)(dist[i].value)
        distribution_list.append(
            (max(0, round(temp, DIGITS_PRECISION)), partitions[i]))
    distribution_list.sort(reverse=True)
    counter = 0
    for a in distribution_list:
        counter = counter + 1
        print(counter, ". ", a[1], " p= ", a[0], sep='')

    print("Responses by Player Two --> Player One Probability of Win")
    response_list = []
    for i in range(num):
        temp = 0
        for j in range(num):
            temp = temp + ((float)(dist[j].value)) * cn[i][j]
        response_list.append((round(temp, 3), partitions[i]))

    response_list.sort()
    for a in response_list:
        print(a[1], " ---> ", a[0])
예제 #27
0
    def test_partial_problem(self) -> None:
        """Test grad for partial minimization/maximization problems.
        """
        for obj in [Minimize((self.a)**-1), Maximize(cp.entr(self.a))]:
            prob = Problem(obj, [self.x + self.a >= [5, 8]])
            # Optimize over nothing.
            expr = partial_optimize(prob,
                                    dont_opt_vars=[self.x, self.a],
                                    solver=cp.ECOS)
            self.a.value = None
            self.x.value = None
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], None)
            self.assertAlmostEqual(grad[self.x], None)
            # Outside domain.
            self.a.value = 1.0
            self.x.value = [5, 5]
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], None)
            self.assertAlmostEqual(grad[self.x], None)

            self.a.value = 1
            self.x.value = [10, 10]
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a])
            self.assertItemsAlmostEqual(grad[self.x].toarray(), [0, 0, 0, 0])

            # Optimize over x.
            expr = partial_optimize(prob, opt_vars=[self.x], solver=cp.ECOS)
            self.a.value = 1
            grad = expr.grad
            self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a] + 0)

            # Optimize over a.
            fix_prob = Problem(obj, [self.x + self.a >= [5, 8], self.x == 0])
            fix_prob.solve(solver=cp.ECOS)
            dual_val = fix_prob.constraints[0].dual_variables[0].value
            expr = partial_optimize(prob, opt_vars=[self.a], solver=cp.ECOS)
            self.x.value = [0, 0]
            grad = expr.grad
            self.assertItemsAlmostEqual(grad[self.x].toarray(), dual_val)

            # Optimize over x and a.
            expr = partial_optimize(prob,
                                    opt_vars=[self.x, self.a],
                                    solver=cp.ECOS)
            grad = expr.grad
            self.assertAlmostEqual(grad, {})
예제 #28
0
def softmax():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('softmax')
    npr.seed(0)

    d = 4
    _x = cp.Parameter((d, 1))
    _y = cp.Variable(d)
    obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y)))
    cons = [sum(_y) == 1.]
    prob = cp.Problem(obj, cons)

    _x.value = npr.randn(d, 1)

    prob.solve(solver=cp.SCS)
    print(_y.value)
예제 #29
0
def channel_capacity(n, m, sum_x: float = 1.0):
    '''
Boyd and Vandenberghe, Convex Optimization, exercise 4.57 page 207
Capacity of a communication channel.

We consider a communication channel, with input x(t)∈{1,..,n} and
output Y(t)∈{1,...,m}, for t=1,2,... .The relation between the
input and output is given statistically:
p_(i,j) = ℙ(Y(t)=i|X(t)=j), i=1,..,m  j=1,...,m
The matrix P ∈ ℝ^(m*n) is called the channel transition matrix, and
the channel is called a discrete memoryless channel. Assuming X has a
probability distribution denoted x ∈ ℝ^n, i.e.,
x_j = ℙ(X=j), j=1,...,n
The mutual information between X and Y is given by
∑(∑(x_j p_(i,j)log_2(p_(i,j)/∑(x_k p_(i,k)))))
Then channel capacity C is given by
C = sup I(X;Y).
With a variable change of y = Px this becomes
I(X;Y)=  c^T x - ∑(y_i log_2 y_i)
where c_j = ∑(p_(i,j)log_2(p_(i,j)))
  '''
    # n is the number of different input values
    # m is the number of different output values
    if n * m == 0:
        print(
            'The range of both input and output values must be greater than zero'
        )
        return 'failed', np.nan, np.nan
    # P is the channel transition matrix
    P = np.ones((m, n))
    # x is probability distribution of the input signal X(t)
    x = cvx.Variable(rows=n, cols=1)
    # y is the probability distribution of the output signal Y(t)
    y = P * x
    # I is the mutual information between x and y
    c = np.sum(P * np.log2(P), axis=0)
    I = c * x + cvx.sum(cvx.entr(y))
    # Channel capacity maximised by maximising the mutual information
    obj = cvx.Minimize(-I)
    constraints = [cvx.sum(x) == sum_x, x >= 0]
    # Form and solve problem
    prob = cvx.Problem(obj, constraints)
    prob.solve()
    if prob.status == 'optimal':
        return prob.status, prob.value, x.value
    else:
        return prob.status, np.nan, np.nan
예제 #30
0
def approxBalATE(X, T, Y, tol_vals, objective):
    # weight the control and treated units so that they match the whole population

    n_ctrl = np.sum(1 - T)  # number of controls
    n_trt = np.sum(T)
    n_cov = X.shape[1]  # number of covariates

    tol = cp.Parameter()

    # weight the control units to match the population
    w_c = cp.Variable(n_ctrl)
    if objective == "l1":
        obj_c = cp.Minimize(cp.norm((w_c - 1 / n_ctrl), 1))
    elif objective == "l2":
        obj_c = cp.Minimize(cp.sum_squares(w_c - 1 / n_ctrl))
    elif objective == "entropy":
        obj_c = cp.Minimize(-cp.sum(cp.entr(w_c)))
    else:
        print("invalid objective")
        # return -1

    constraints_c = [cp.sum(w_c) == 1]

    constraints_c += [0 <= w_c]
    for i in range(n_cov):
        constraints_c += [X[T==0][:,i]*w_c - \
                        np.mean(X[:,i]) <= \
                        tol * X[:,i].std()]
        constraints_c += [np.mean(X[:,i]) -\
                        X[T==0][:,i]*w_c <=\
                        tol * X[:,i].std()]
    prob_c = cp.Problem(obj_c, constraints_c)

    w_c_vals = []
    for tol_val in tol_vals:
        tol.value = tol_val
        try:
            result_c = prob_c.solve()
            if w_c.value is None:
                w_c.value = -1. * np.ones(n_ctrl)
            w_c_vals.append(w_c.value)
        except SolverError:
            w_c_vals.append(-1. * np.ones(n_ctrl))

    return w_c_vals
예제 #31
0
 def solve(self):
     if self.solver == "lp":
         xsol = np.linalg.lstsq(self.loc_moments, self.moments)[0]
         self.values = xsol
     else:
         # Moment values of the boundaries
         Xs = cvx.Variable(self.resolution)
         constraints = [
             Xs >= 0, Xs <= 1.0, self.loc_moments * Xs == self.moments
         ]
         if self.solver == "mindensity":
             o = cvx.Minimize(cvx.max_entries(Xs))
         else:
             o = cvx.Maximize(cvx.sum_entries(cvx.entr(Xs)))
         prob = cvx.Problem(o, constraints)
         sol = prob.solve(solver=cvx.ECOS)
         self.values = Xs.value
     return self.values * 1000
예제 #32
0
def calculate_weight(updateList, df, queryList, pointList):
    """
    Main idea:
        given a list of database instance (updateList), a bundle of queries and corresponding price points, calculate the weight
    for each instance in the support set (updateList)

    Args:
        updateList
        queryList, 用户指定
        pointList, 用户指定
        df, original database instance
    Return:
        a list of weights for each instance in the updateList
    """

    n = len(updateList)
    # W type is Variable, which is a inner type defined in cvxpy
    # W.value type is numpy matrix
    W = cv.Variable(n)

    cost = sum(cv.entr(W))
    obj = cv.Maximize(cost)

    # form the constraints
    constraints = []

    # the whole database price 不需要进入whole database price point, 因为有些地方是privacy的,无价的
    #     constraints.append(sum([W[i] for i in range(n)]) == databasePoint)
    constraints.append(0 <= W)

    constraintList = construct_constraints(updateList, df, queryList, pointList)
    for constraint in constraintList:
        index = constraint[0]
        value = constraint[1]
        if index is not None:
            constraints.append(sum([W[i] for i in index]) == value)

    # Form and solve problem.
    prob = cv.Problem(obj, constraints)
    prob.solve()  # Returns the optimal value.
    # print("status:", prob.status)
    # print("optimal value", prob.value)
    # print("optimal var", W.value)
    return W.value
예제 #33
0
파일: prox_test.py 프로젝트: silky/epsilon
 prox("SECOND_ORDER_CONE", None, C_soc_scaled),
 prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated),
 prox("SECOND_ORDER_CONE", None, C_soc_translated),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),
 prox("SECOND_ORDER_CONE", None, lambda: [cp.norm2(x) <= t]),
 prox("SEMIDEFINITE", None, lambda: [X >> 0]),
 prox("SUM_DEADZONE", f_dead_zone),
 prox("SUM_EXP", lambda: cp.sum_entries(cp.exp(x))),
 prox("SUM_HINGE", f_hinge),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
 prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
 prox("SUM_KL_DIV", lambda: cp.sum_entries(cp.kl_div(p1,q1))),
 prox("SUM_LARGEST", lambda: cp.sum_largest(x, 4)),
 prox("SUM_LOGISTIC", lambda: cp.sum_entries(cp.logistic(x))),
 prox("SUM_NEG_ENTR", lambda: cp.sum_entries(-cp.entr(x))),
 prox("SUM_NEG_LOG", lambda: cp.sum_entries(-cp.log(x))),
 prox("SUM_QUANTILE", f_quantile),
 prox("SUM_QUANTILE", f_quantile_elemwise),
 prox("SUM_SQUARE", f_least_squares_matrix),
 prox("SUM_SQUARE", lambda: f_least_squares(20)),
 prox("SUM_SQUARE", lambda: f_least_squares(5)),
 prox("SUM_SQUARE", f_quad_form),
 prox("TOTAL_VARIATION_1D", lambda: cp.tv(x)),
 prox("ZERO", None, C_linear_equality),
 prox("ZERO", None, C_linear_equality_matrix_lhs),
 prox("ZERO", None, C_linear_equality_matrix_rhs),
 prox("ZERO", None, C_linear_equality_multivariate),
 prox("ZERO", None, C_linear_equality_multivariate2),
 prox("ZERO", None, lambda: C_linear_equality_graph(20)),
 prox("ZERO", None, lambda: C_linear_equality_graph(5)),
예제 #34
0
 def maximum_entropy(self, assumptions, **kwargs):
     prob = cvxpy.Problem(
         cvxpy.Maximize(cvxpy.sum_entries(cvxpy.entr(self._cvxpy_var))),
         assumptions + [cvxpy.sum_entries(self._cvxpy_var) == 1] + [self._cvxpy_var >= 0],
     )
     prob.solve(**kwargs)