Example #1
0
    def test(self):
        np.random.seed(0)
        cvx_problem = problem_gen()

        problem = TestTensorProblem(TensorProblem(cvx_problem))
        data = cvx_problem.get_problem_data(cvx.SCS)

        # Compare with manually implemented SCS iterations
        m, n = data["A"].shape
        u0 = np.zeros((m + n + 1, 1))
        v0 = np.zeros((m + n + 1, 1))
        u0[-1] = 1
        v0[-1] = 1

        # variables
        u, v = scs_tf.create_variables(problem)
        cache = scs_tf.create_cache(problem)
        counters = scs_tf.create_counters()

        # ops
        init_op = tf.initialize_all_variables()
        init_cache_op = scs_tf.init_cache(problem, cache)
        iterate_op = scs_tf.iterate(problem, u, v, cache, counters)
        residuals = scs_tf.compute_residuals(problem, u, v)

        # Run two iterations
        u_vec = vstack([u.x, u.y, u.tau])
        v_vec = vstack([v.r, v.s, v.kappa])

        with self.test_session():
            init_op.run()
            init_cache_op.run()

            tf.logging.info("initialization")
            assert_allclose(u0, u_vec.eval())
            assert_allclose(v0, v_vec.eval())

            tf.logging.info("first iteration")
            iterate_op.run()
            u_tilde0 = expected_subspace_projection(data, u0 + v0)
            u0 = expected_cone_projection(data, u_tilde0 - v0)
            v0 = v0 - u_tilde0 + u0
            assert_allclose(u0, u_vec.eval(), rtol=0, atol=1e-4)
            assert_allclose(v0, v_vec.eval(), rtol=0, atol=1e-4)

            u0 = u_vec.eval()
            v0 = v_vec.eval()

            tf.logging.info("second iteration")
            iterate_op.run()
            u_tilde0 = expected_subspace_projection(data, u0 + v0)
            u0 = expected_cone_projection(data, u_tilde0 - v0)
            v0 = v0 - u_tilde0 + u0
            assert_allclose(u0, u_vec.eval(), rtol=0, atol=1e-2)
            assert_allclose(v0, v_vec.eval(), rtol=0, atol=1e-2)
Example #2
0
    def test(self):
        np.random.seed(0)
        cvx_problem = problem_gen()

        problem = TestTensorProblem(TensorProblem(cvx_problem))
        data = cvx_problem.get_problem_data(cvx.SCS)

        # Compare with manually implemented SCS iterations
        m, n = data["A"].shape
        u0 = np.zeros((m+n+1,1))
        v0 = np.zeros((m+n+1,1))
        u0[-1] = 1
        v0[-1] = 1

        # variables
        u, v = scs_tf.create_variables(problem)
        cache = scs_tf.create_cache(problem)
        counters = scs_tf.create_counters()

        # ops
        init_op = tf.initialize_all_variables()
        init_cache_op = scs_tf.init_cache(problem, cache)
        iterate_op = scs_tf.iterate(problem, u, v, cache, counters)
        residuals = scs_tf.compute_residuals(problem, u, v)

        # Run two iterations
        u_vec = vstack([u.x, u.y, u.tau])
        v_vec = vstack([v.r, v.s, v.kappa])

        with self.test_session():
            init_op.run()
            init_cache_op.run()

            tf.logging.info("initialization")
            assert_allclose(u0, u_vec.eval())
            assert_allclose(v0, v_vec.eval())

            tf.logging.info("first iteration")
            iterate_op.run()
            u_tilde0 = expected_subspace_projection(data, u0 + v0)
            u0 = expected_cone_projection(data, u_tilde0 - v0)
            v0 = v0 - u_tilde0 + u0
            assert_allclose(u0, u_vec.eval(), rtol=0, atol=1e-4)
            assert_allclose(v0, v_vec.eval(), rtol=0, atol=1e-4)

            u0 = u_vec.eval()
            v0 = v_vec.eval()

            tf.logging.info("second iteration")
            iterate_op.run()
            u_tilde0 = expected_subspace_projection(data, u0 + v0)
            u0 = expected_cone_projection(data, u_tilde0 - v0)
            v0 = v0 - u_tilde0 + u0
            assert_allclose(u0, u_vec.eval(), rtol=0, atol=1e-2)
            assert_allclose(v0, v_vec.eval(), rtol=0, atol=1e-2)
Example #3
0
 def A(self, x):
     xs = {}
     for var_id, var_size in self.sym_data.var_sizes.items():
         var_offset = self.sym_data.var_offsets[var_id]
         idx = slice(var_offset, var_offset+var_size[0]*var_size[1])
         xs[var_id] = mat(x[idx,:], var_size)
     return vstack([vec(cvxpy_expr.tensor(Ai, xs)) for Ai in self.A_exprs])
Example #4
0
def get_constraint_tensors(constraints):
    """Get expression for Ax + b."""
    A_exprs = [constr.expr for constr in tree_mat.prune_constants(constraints)]
    b = vstack(
        [vec(tf.constant(-tree_mat.mul(constr.expr, {}), dtype=tf.float32))
         for constr in constraints])
    return A_exprs, b
Example #5
0
 def A(self, x):
     xs = {}
     for var_id, var_size in self.sym_data.var_sizes.items():
         var_offset = self.sym_data.var_offsets[var_id]
         idx = slice(var_offset, var_offset + var_size[0] * var_size[1])
         xs[var_id] = mat(x[idx, :], var_size)
     return vstack([vec(cvxpy_expr.tensor(Ai, xs)) for Ai in self.A_exprs])
Example #6
0
def get_constraint_tensors(constraints):
    """Get expression for Ax + b."""
    A_exprs = [constr.expr for constr in tree_mat.prune_constants(constraints)]
    b = vstack([
        vec(tf.constant(-tree_mat.mul(constr.expr, {}), dtype=tf.float32))
        for constr in constraints
    ])
    return A_exprs, b
Example #7
0
def proj_cone(cone_slices, x, dual=False):
    ys = []
    prefix = "proj_dual_" if dual else "proj_"
    for cone, idx in cone_slices:
        proj = globals()[prefix + cone]
        ys.append(proj(x[idx,:]))

    return vstack(ys)
Example #8
0
def proj_second_order(x):
    s, v = x[:1,:], x[1:,:]
    norm_v = norm(v)
    s = tf.squeeze(s)
    return tf.case(
        ((norm_v <= -s, lambda: tf.zeros_like(x)),
         (norm_v <=  s, lambda: x)),
        lambda: 0.5*(1 + s/norm_v)*vstack([tf.reshape(norm_v, (1,1)), v]))
Example #9
0
 def AT(self, y):
     ys = []
     offset = 0
     for constr in self.constraints:
         idx = slice(offset, offset+constr.size[0]*constr.size[1])
         ys.append(mat(y[idx,:], constr.size))
         offset += constr.size[0]*constr.size[1]
     x_map = sum_dicts(cvxpy_expr.adjoint_tensor(Ai, ys[i])
                       for i, Ai in enumerate(self.A_exprs))
     return vstack([vec(x_map[var_id]) for var_id in self.var_ids])
Example #10
0
 def AT(self, y):
     ys = []
     offset = 0
     for constr in self.constraints:
         idx = slice(offset, offset + constr.size[0] * constr.size[1])
         ys.append(mat(y[idx, :], constr.size))
         offset += constr.size[0] * constr.size[1]
     x_map = sum_dicts(
         cvxpy_expr.adjoint_tensor(Ai, ys[i])
         for i, Ai in enumerate(self.A_exprs))
     return vstack([vec(x_map[var_id]) for var_id in self.var_ids])
Example #11
0
def get_objective_tensor(var_ids, sym_data):
    """Get objective tensor via gradient of c'x."""
    xs = [tf.Variable(tf.zeros(sym_data.var_sizes[var_id], dtype=tf.float32))
          for var_id in var_ids]
    xs_map = dict(zip((var_id for var_id in var_ids), xs))
    obj_t = cvxpy_expr.tensor(sym_data.objective, xs_map)

    # get gradient, handling None values
    return vstack([
        vec(ci) if ci is not None
        else vec(tf.zeros(sym_data.var_sizes[var_ids[i]], dtype=tf.float32))
        for i, ci in enumerate(tf.gradients(obj_t, xs))])
Example #12
0
def get_objective_tensor(var_ids, sym_data):
    """Get objective tensor via gradient of c'x."""
    xs = [
        tf.Variable(tf.zeros(sym_data.var_sizes[var_id], dtype=tf.float32))
        for var_id in var_ids
    ]
    xs_map = dict(zip((var_id for var_id in var_ids), xs))
    obj_t = cvxpy_expr.tensor(sym_data.objective, xs_map)

    # get gradient, handling None values
    return vstack([
        vec(ci) if ci is not None else vec(
            tf.zeros(sym_data.var_sizes[var_ids[i]], dtype=tf.float32))
        for i, ci in enumerate(tf.gradients(obj_t, xs))
    ])