예제 #1
0
def easy_lp_problem():
    scale = 1

    LE_A = -1 * np.array([
        [1, 0, 0, 1],  # >= 1
        [0, 1, 0, 1],  # >= 1
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
    ]) * scale

    LE_B = -1 * np.array([1, 1, 0, 0, 0, 0]) * scale

    EQ_A = np.array([
        [1, 1, 0, 0],  #==1
        [0, 1, 1, 0],  #==1
    ]) * scale

    EQ_B = np.array([1, 1]) * scale

    OBJ_C = np.array([2, 3, 1, 5]) * scale

    _vars = ["x_%s" % i for i in range(4)]
    return Problem.from_numpy(_vars, (None, OBJ_C, None), (LE_A, LE_B),
                              (EQ_A, EQ_B), torch.device("cpu"))
예제 #2
0
def random_benchmark_problem():
    import numpy as np
    np.random.seed(42)

    N = 400
    M1 = 800
    R = 40

    A1 = np.zeros((M1, N))

    for i in range(M1):
        indexs = np.random.choice(N, size=R, replace=False)
        A1[i, indexs] = np.abs(np.random.randn(R))

    A1 = -1 * A1
    A2 = -1 * np.diag(np.ones(N))

    A = np.concatenate([A1, A2], axis=0)

    B1 = -1 * np.ones(M1)
    B2 = -1 * np.zeros(N)

    B = np.concatenate([B1, B2])

    b = np.ones(N)

    _vars = ["x_%s" % i for i in range(N)]

    p = Problem.from_numpy(_vars, (None, b, None), (A, B), None,
                           torch.device("cpu"))
    return p
예제 #3
0
def bad_le_lp_problem():
    LE_A = -1 * torch.DoubleTensor([
        [-1, -1, 0, -1],  # <= 0.5
        [1, 0, 0, 1],  # >= 1
        [0, 1, 0, 1],  # >= 1
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
    ])

    LE_B = -1 * torch.DoubleTensor([-0.5, 1, 1, 0, 0, 0, 0])

    EQ_A = torch.DoubleTensor([
        [1, 1, 0, 0],  #==1
        [0, 1, 1, 0],  #==2
    ])

    EQ_B = torch.DoubleTensor([1, 1])

    OBJ_C = torch.DoubleTensor([2, 3, 1, 5])
    _vars = ["x_%s" % i for i in range(4)]

    return Problem.from_numpy(_vars, (None, OBJ_C, None), (LE_A, LE_B),
                              (EQ_A, EQ_B), torch.device("cpu"))
예제 #4
0
def bad_eq_lp_problem():
    LE_A = -1 * torch.DoubleTensor([
        [1, 0, 0, 1],  # >= 1
        [0, 1, 0, 1],  # >= 1
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
    ])

    LE_B = -1 * torch.DoubleTensor([1, 1, 0, 0, 0, 0])

    EQ_A = torch.DoubleTensor([
        [1, 1, 0, 0],  #==1
        [1, 1, 0, 0],  #==2
    ])

    EQ_B = torch.DoubleTensor([1, 2])

    OBJ_C = torch.DoubleTensor([2, 3, 1, 5])

    eq = LinearEqConstraints(EQ_A, EQ_B)
    le = LinearLeConstraints(LE_A, LE_B)
    obj = LinearObjective(OBJ_C)

    _vars = ["x_%s" % i for i in range(4)]
    return Problem(_vars, obj, le, eq)
예제 #5
0
def random_eq_qp_problem():
    import numpy as np
    import scipy.sparse as spa
    import cvxpy

    np.random.seed(42)
    n = 100

    m = int(n / 2)

    # Generate problem data
    n = int(n)
    m = m
    P = spa.random(n, n, density=0.15, data_rvs=np.random.randn, format='csc')
    P = P.dot(P.T).tocsc() + 1e-02 * spa.eye(n)
    q = np.random.randn(n)
    A = spa.random(m, n, density=0.15, data_rvs=np.random.randn, format='csc')
    x_sol = np.random.randn(n)  # Create fictitious solution
    l = A @ x_sol
    u = np.copy(l)

    _vars = ["x_%s" % i for i in range(n)]

    p = Problem.from_numpy(_vars, (0.5 * P.todense(), q, None), None,
                           (A.todense(), u), torch.device("cpu"),
                           torch.float64)
    return p
예제 #6
0
def easy_qp_problem():
    LE_A = -1 * torch.DoubleTensor([[1, 1, 0, 0], [0, 0, 1, 1]])
    LE_B = -1 * torch.DoubleTensor([1, 1])

    # EQ_A = -1 * torch.DoubleTensor([[0,0,0,0],
    #                            [0,0,1,1]])
    # EQ_B = -1 * torch.DoubleTensor([1,1])

    OBJ_A = torch.DoubleTensor(np.diag([1, 1, 1, 1]))
    OBJ_B = torch.DoubleTensor([1, 1, 1, 1])

    eq = None
    # eq = LinearEqConstraints(EQ_A,EQ_B)
    le = LinearLeConstraints(LE_A, LE_B)
    obj = QuadraticObjective(OBJ_A, OBJ_B)

    _vars = ["x_%s" % i for i in range(4)]
    x = torch.ones(4).double()
    print("obj:", obj(x))

    return Problem(_vars, obj, le, eq)
예제 #7
0
def random_benchmark_problem2():
    import numpy as np
    import scipy.sparse as spa
    n = 100
    m = 100
    seed = 42

    np.random.seed(seed)
    c = np.random.randn(n)
    A = spa.random(m, n, density=0.15, data_rvs=np.random.randn,
                   format='csc').todense()
    v = np.random.randn(n)  # Fictitious solution
    delta = np.abs(np.random.rand(m))  # To get inequality
    b = A @ v + delta

    _vars = ["x_%s" % i for i in range(n)]

    p = Problem.from_numpy(_vars,
                           obj=(None, c, None),
                           le=(-A, -b),
                           device=torch.device("cpu"))
    return p
예제 #8
0
def random_le_qp_problem():
    import numpy as np
    import scipy.sparse as spa
    import cvxpy

    np.random.seed(42)

    n = 100
    m = int(n * 2)

    P = spa.random(n, n, density=0.15, data_rvs=np.random.randn, format='csc')
    P = P.dot(P.T).tocsc() + 1e-02 * spa.eye(n)
    q = np.random.randn(n)
    A = spa.random(m, n, density=0.15, data_rvs=np.random.randn, format='csc')
    v = np.random.randn(n)  # Fictitious solution
    delta = np.abs(np.random.rand(m))  # To get inequality
    u = A @ v + delta
    # l = - np.inf * np.ones(m)  # u - np.random.rand(m)
    _vars = ["x_%s" % i for i in range(n)]

    p = Problem.from_numpy(_vars, (0.5 * P.todense(), q, None),
                           (A.todense(), u), None, torch.device("cpu"),
                           torch.float64)
    return p
예제 #9
0
def benchmark_problem():
    c, G, h, A, b = read_mps_preprocess("25fv47")
    _vars = ["x_%s" % i for i in range(c.size)]

    return Problem.from_numpy(_vars, (None, c, None), (G, h), (A, b))
예제 #10
0
def jet20_default_backend_func(problem,
                               x=None,
                               opt_tolerance=1e-3,
                               opt_u=10.0,
                               opt_alpha=0.1,
                               opt_beta=0.5,
                               opt_constraint_tolerance=1e-5,
                               opt_verbose=False,
                               rouding_precision=3,
                               force_rouding=False,
                               device="cuda"):
    """
    This function is a wrapper of jet20 backend.

    :param problem: the problem instance.
    :type problem: class:`jet20.Problem`.
    :param x: initial solution of the problem
    :type x: list,numpy.ndarray
    :param opt_u: hyperparameters for interior point method
    :type opt_u: float
    :param opt_alpha: hyperparameters for line search
    :type opt_alpha: float
    :param opt_beta: hyperparameters for line search
    :type opt_beta: float
    :param opt_tolerance: objective value tolerance
    :type opt_tolerance: float
    :param opt_constraint_tolerance: feasibility tolerance
    :type opt_constraint_tolerance: float
    :param rouding_precision: rouding precision
    :type rouding_precision: int
    :param force_rouding: whether force rounding
    :type rouding_precision: bool
    :return: solution of the problem
    :rtype: Solution
    """

    eps = np.finfo(np.float64).eps
    config = Config(opt_tolerance=opt_tolerance,
                    opt_u=opt_u,
                    opt_alpha=opt_alpha,
                    opt_beta=opt_beta,
                    opt_constraint_tolerance=opt_constraint_tolerance,
                    opt_verbose=opt_verbose,
                    rouding_precision=rouding_precision,
                    force_rouding=force_rouding,
                    device=device)

    s = Solver()
    eqf = EnsureEqFeasible()
    lef = EnsureLeFeasible()
    r = Rounding()
    s.register_pres(eqf, lef)
    s.register_posts(r, eqf, lef)

    var_names = [v.name for v in problem._variables]
    _obj, _constraints, _ops, _consts = problem.canonical

    m, n = _obj.shape
    assert m == n

    obj_Q = _obj[:n - 1, :n - 1]
    obj_b = _obj[n - 1, :n - 1] + _obj[:n - 1, n - 1]
    obj_c = _obj[n - 1][n - 1]
    obj = (obj_Q, obj_b, obj_c)

    assert _constraints.shape[0] == _ops.shape[0] == _consts.shape[0]

    eq_A = _constraints[_ops == OP_EQUAL]
    eq_b = _consts[_ops == OP_EQUAL]
    if eq_A.size > 0:
        eq = (eq_A, eq_b)
    else:
        eq = None

    _le_A = _constraints[_ops == OP_LE]
    _le_b = _consts[_ops == OP_LE]

    lt_A = _constraints[_ops == OP_LT]
    lt_b = _consts[_ops == OP_LT]
    lt_b = lt_b - eps

    le_A = np.concatenate([_le_A, lt_A])
    le_b = np.concatenate([_le_b, lt_b])

    if le_A.size > 0:
        le = (le_A, le_b)
    else:
        le = None

    p = P.from_numpy(var_names,
                     obj,
                     le,
                     eq,
                     dtype=torch.float64,
                     device=config.device)
    if x is not None:
        x = torch.tensor(x, dtype=torch.float64, device=config.device)

    return s.solve(p, config, x)