예제 #1
0
    def test_isin(self):

        bv = self.bv
        test_bv = BlockVector(2)
        a = np.array([1.1, 3.3])
        b = np.array([5.5, 7.7])
        test_bv.set_block(0, a)
        test_bv.set_block(1, b)

        res = pn.isin(bv, test_bv)
        for bid, blk in enumerate(bv):
            self.assertEqual(blk.size, res.get_block(bid).size)
            res_flat = np.isin(blk, test_bv.get_block(bid))
            self.assertTrue(np.allclose(res.get_block(bid), res_flat))

        c = np.concatenate([a, b])
        res = pn.isin(bv, c)
        for bid, blk in enumerate(bv):
            self.assertEqual(blk.size, res.get_block(bid).size)
            res_flat = np.isin(blk, c)
            self.assertTrue(np.allclose(res.get_block(bid), res_flat))

        res = pn.isin(bv, test_bv, invert=True)
        for bid, blk in enumerate(bv):
            self.assertEqual(blk.size, res.get_block(bid).size)
            res_flat = np.isin(blk, test_bv.get_block(bid), invert=True)
            self.assertTrue(np.allclose(res.get_block(bid), res_flat))

        c = np.concatenate([a, b])
        res = pn.isin(bv, c, invert=True)
        for bid, blk in enumerate(bv):
            self.assertEqual(blk.size, res.get_block(bid).size)
            res_flat = np.isin(blk, c, invert=True)
            self.assertTrue(np.allclose(res.get_block(bid), res_flat))
예제 #2
0
    def test_get_block_vector_for_dot_product_5(self):
        rank = comm.Get_rank()

        rank_ownership = np.array([[1, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]])
        m = MPIBlockMatrix(4, 3, rank_ownership, comm)
        sub_m = np.array([[1, 0], [0, 1]])
        sub_m = coo_matrix(sub_m)
        if rank == 0:
            m.set_block(3, rank, sub_m.copy())
        elif rank == 1:
            m.set_block(0, 0, sub_m.copy())
            m.set_block(rank, rank, sub_m.copy())
            m.set_block(3, rank, sub_m.copy())
        else:
            m.set_block(rank, rank, sub_m.copy())
            m.set_block(3, rank, sub_m.copy())

        v = BlockVector(3)
        sub_v = np.ones(2)
        for ndx in range(3):
            v.set_block(ndx, sub_v.copy())

        res = m._get_block_vector_for_dot_product(v)

        self.assertIs(res, v)

        v_flat = v.flatten()
        res = m._get_block_vector_for_dot_product(v_flat)
        self.assertIsInstance(res, BlockVector)
        for ndx in range(3):
            block = res.get_block(ndx)
            self.assertTrue(np.array_equal(block, sub_v))
    def do_back_solve(self, rhs):
        """
        Parameters
        ----------
        rhs: BlockVector

        Returns
        -------
        result: BlockVector
        """
        schur_complement_rhs = rhs.get_block(self.block_dim - 1)
        for ndx in range(self.block_dim - 1):
            A = self.block_matrix.get_block(self.block_dim - 1, ndx)
            contribution = self.subproblem_solvers[ndx].do_back_solve(
                rhs.get_block(ndx))
            schur_complement_rhs -= A.tocsr().dot(contribution)

        result = BlockVector(self.block_dim)
        coupling = self.schur_complement_solver.do_back_solve(
            schur_complement_rhs)
        result.set_block(self.block_dim - 1, coupling)

        for ndx in range(self.block_dim - 1):
            A = self.block_matrix.get_block(self.block_dim - 1, ndx)
            result.set_block(
                ndx, self.subproblem_solvers[ndx].do_back_solve(
                    rhs.get_block(ndx) -
                    A.tocsr().transpose().dot(coupling.flatten())))

        return result
예제 #4
0
    def test_dot(self):

        mat1 = self.square_mpi_mat
        mat2 = self.square_mpi_mat2

        serial_mat1 = self.square_serial_mat
        serial_mat2 = self.square_serial_mat2

        rank = comm.Get_rank()

        bv1 = MPIBlockVector(2, [0, 1], comm)

        if rank == 0:
            bv1.set_block(0, np.arange(4, dtype=np.float64))
        if rank == 1:
            bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        bv1.broadcast_block_sizes()

        serial_bv1 = BlockVector(2)
        serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
        serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)

        res = mat1.dot(bv1)
        serial_res = serial_mat1.dot(serial_bv1)
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))
예제 #5
0
def compute_init_lam(nlp, x=None, lam_max=1e3):
    if x is None:
        x = nlp.init_primals()
    else:
        assert x.size == nlp.n_primals()
    nlp.set_primals(x)

    assert nlp.n_ineq_constraints(
    ) == 0, "only supported for equality constrained nlps for now"

    nx = nlp.n_primals()
    nc = nlp.n_constraints()

    # create Jacobian
    jac = nlp.evaluate_jacobian()

    # create gradient of objective
    df = nlp.evaluate_grad_objective()

    # create KKT system
    kkt = BlockMatrix(2, 2)
    kkt.set_block(0, 0, identity(nx))
    kkt.set_block(1, 0, jac)
    kkt.set_block(0, 1, jac.transpose())

    zeros = np.zeros(nc)
    rhs = BlockVector(2)
    rhs.set_block(0, -df)
    rhs.set_block(1, zeros)

    flat_kkt = kkt.tocoo().tocsc()
    flat_rhs = rhs.flatten()

    sol = spsolve(flat_kkt, flat_rhs)
    return sol[nlp.n_primals():nlp.n_primals() + nlp.n_constraints()]
예제 #6
0
 def create_blocks(self, m: np.ndarray, x: np.ndarray):
     m = coo_matrix(m)
     r = m * x
     bm = BlockMatrix(2, 2)
     bm.set_block(0, 0, m.copy())
     bm.set_block(1, 1, m.copy())
     br = BlockVector(2)
     br.set_block(0, r.copy())
     br.set_block(1, r.copy())
     bx = BlockVector(2)
     bx.set_block(0, x.copy())
     bx.set_block(1, x.copy())
     return bm, bx, br
예제 #7
0
    def test_multiply(self):

        # check scalar multiplication
        block = self.block_m
        m = self.basic_m * 5.0
        scipy_mat = bmat([[block, block], [None, block]], format='coo')
        mulscipy_mat = scipy_mat * 5.0
        dinopy_mat = m.tocoo()
        drow = np.sort(dinopy_mat.row)
        dcol = np.sort(dinopy_mat.col)
        ddata = np.sort(dinopy_mat.data)
        srow = np.sort(mulscipy_mat.row)
        scol = np.sort(mulscipy_mat.col)
        sdata = np.sort(mulscipy_mat.data)
        self.assertListEqual(drow.tolist(), srow.tolist())
        self.assertListEqual(dcol.tolist(), scol.tolist())
        self.assertListEqual(ddata.tolist(), sdata.tolist())

        m = 5.0 * self.basic_m
        dinopy_mat = m.tocoo()
        drow = np.sort(dinopy_mat.row)
        dcol = np.sort(dinopy_mat.col)
        ddata = np.sort(dinopy_mat.data)
        self.assertListEqual(drow.tolist(), srow.tolist())
        self.assertListEqual(dcol.tolist(), scol.tolist())
        self.assertListEqual(ddata.tolist(), sdata.tolist())

        # check dot product with block vector
        block = self.block_m
        m = self.basic_m
        scipy_mat = bmat([[block, block], [None, block]], format='coo')
        x = BlockVector(2)
        x.set_block(0, np.ones(block.shape[1], dtype=np.float64))
        x.set_block(1, np.ones(block.shape[1], dtype=np.float64))

        res_scipy = scipy_mat.dot(x.flatten())
        res_dinopy = m * x
        res_dinopy_flat = m * x.flatten()

        self.assertListEqual(res_dinopy.tolist(), res_scipy.tolist())
        self.assertListEqual(res_dinopy_flat.tolist(), res_scipy.tolist())

        dense_mat = dinopy_mat.toarray()
        self.basic_m *= 5.0
        self.assertTrue(np.allclose(dense_mat, self.basic_m.toarray()))
예제 #8
0
    def test_schur_complement(self):
        A = BlockMatrix(4, 4)
        A.set_block(0, 0, coo_matrix(np.array([[1, 1],
                                               [0, 1]], dtype=np.double)))
        A.set_block(1, 1, coo_matrix(np.array([[1, 0],
                                               [0, 1]], dtype=np.double)))
        A.set_block(2, 2, coo_matrix(np.array([[1, 0],
                                               [1, 1]], dtype=np.double)))
        A.set_block(3, 3, coo_matrix(np.array([[0, 0],
                                               [0, 0]], dtype=np.double)))
        A.set_block(3, 0, coo_matrix(np.array([[0, -1],
                                               [0, 0]], dtype=np.double)))
        A.set_block(3, 1, coo_matrix(np.array([[-1, 0],
                                               [0, -1]], dtype=np.double)))
        A.set_block(3, 2, coo_matrix(np.array([[0, 0],
                                               [-1, 0]], dtype=np.double)))
        A_upper = A.copy_structure()
        A_upper.set_block(0, 3, A.get_block(3, 0).transpose(copy=True))
        A_upper.set_block(1, 3, A.get_block(3, 1).transpose(copy=True))
        A_upper.set_block(2, 3, A.get_block(3, 2).transpose(copy=True))

        rhs = BlockVector(4)
        rhs.set_block(0, np.array([1, 0], dtype=np.double))
        rhs.set_block(1, np.array([0, 0], dtype=np.double))
        rhs.set_block(2, np.array([0, 1], dtype=np.double))
        rhs.set_block(3, np.array([1, 1], dtype=np.double))

        x1 = np.linalg.solve((A + A_upper).toarray(), rhs.flatten())

        sc_solver = parapint.linalg.SchurComplementLinearSolver(subproblem_solvers={ndx: ScipyInterface(compute_inertia=True) for ndx in range(3)},
                                                               schur_complement_solver=ScipyInterface(compute_inertia=True))
        sc_solver.do_symbolic_factorization(A)
        sc_solver.do_numeric_factorization(A)
        x2 = sc_solver.do_back_solve(rhs)

        inertia1 = sc_solver.get_inertia()
        eig = np.linalg.eigvals((A + A_upper).toarray())
        pos = np.count_nonzero(eig > 0)
        neg = np.count_nonzero(eig < 0)
        zero = np.count_nonzero(eig == 0)
        inertia2 = (pos, neg, zero)
        self.assertTrue(np.allclose(x1, x2.flatten()))
        self.assertEqual(inertia1, inertia2)
예제 #9
0
    def test_dot(self):
        A_dense = self.basic_m.toarray()
        A_block = self.basic_m
        x = np.ones(A_dense.shape[1])
        block_x = BlockVector(2)
        block_x.set_block(0, np.ones(self.block_m.shape[1]))
        block_x.set_block(1, np.ones(self.block_m.shape[1]))
        flat_res = A_block.dot(x).flatten()
        block_res = A_block.dot(block_x)
        self.assertTrue(np.allclose(A_dense.dot(x), flat_res))
        self.assertTrue(np.allclose(A_dense.dot(x), block_res.flatten()))
        self.assertEqual(block_res.bshape[0], 2)

        m = BlockMatrix(2, 2)
        sub_m = np.array([[1, 0], [0, 1]])
        sub_m = coo_matrix(sub_m)
        m.set_block(0, 1, sub_m.copy())
        m.set_block(1, 0, sub_m.copy())
        x = np.arange(4)
        res = m * x
        self.assertTrue(np.allclose(res.flatten(), np.array([2, 3, 0, 1])))
예제 #10
0
 def evaluate_eq_constraints(self) -> BlockVector:
     """
     Returns
     -------
     eq_resid: BlockVector
         The residuals of the equality constraints, including the coupling constraints.
         This BlockVector has one block for every time block. Each block is itself a BlockVector with
         3 blocks. The first block contains the residuals of the equality constraints in the corresponding time
         block. The second block has the residuals for the coupling constraints linking the states at the
         beginning of the time block to the coupling variables between the time block and the previous
         time block. The third block has the residuals for the coupling constraints linking the states at the
         end of the time block to the coupling variables between the time block and the next time block.
     """
     for ndx, nlp in self._nlps.items():
         sub_block = BlockVector(3)
         sub_block.set_block(0, nlp.evaluate_eq_constraints())
         sub_block.set_block(
             1, (self._link_backward_matrices[ndx] * nlp.get_primals() -
                 (self._link_backward_coupling_matrices[ndx] *
                  self._primals.get_block(self._num_time_blocks))))
         sub_block.set_block(
             2, (self._link_forward_matrices[ndx] * nlp.get_primals() -
                 (self._link_forward_coupling_matrices[ndx] *
                  self._primals.get_block(self._num_time_blocks))))
         self._eq_resid.set_block(ndx, sub_block)
     return self._eq_resid
예제 #11
0
    def test_mumps_linear_solver(self):
        A = np.array([[ 1,  7,  3],
                      [ 7,  4, -5],
                      [ 3, -5,  6]], dtype=np.double)
        A = coo_matrix(A)
        A_lower = tril(A)
        x1 = np.arange(3) + 1
        b1 = A * x1
        x2 = np.array(list(reversed(x1)))
        b2 = A * x2

        solver = MumpsCentralizedAssembledLinearSolver()
        solver.do_symbolic_factorization(A)
        solver.do_numeric_factorization(A)
        x = solver.do_back_solve(b1)
        self.assertTrue(np.allclose(x, x1))
        x = solver.do_back_solve(b2)
        self.assertTrue(np.allclose(x, x2))

        solver = MumpsCentralizedAssembledLinearSolver(sym=2)
        x = solver.solve(A_lower, b1)
        self.assertTrue(np.allclose(x, x1))

        block_A = BlockMatrix(2, 2)
        block_A.set_row_size(0, 2)
        block_A.set_row_size(1, 1)
        block_A.set_col_size(0, 2)
        block_A.set_col_size(1, 1)
        block_A.copyfrom(A)

        block_b1 = BlockVector(2)
        block_b1.set_block(0, b1[0:2])
        block_b1.set_block(1, b1[2:])
        
        block_b2 = BlockVector(2)
        block_b2.set_block(0, b2[0:2])
        block_b2.set_block(1, b2[2:])

        solver = MumpsCentralizedAssembledLinearSolver(icntl_options={10: -3}, cntl_options={2: 1e-16})
        solver.do_symbolic_factorization(block_A)
        solver.do_numeric_factorization(block_A)
        x = solver.do_back_solve(block_b1)
        self.assertTrue(np.allclose(x, x1))
        x = solver.do_back_solve(block_b2)
        self.assertTrue(np.allclose(x, x2))
        self.assertEqual(solver.get_infog(15), 3)
예제 #12
0
파일: interface.py 프로젝트: vova292/pyomo
    def evaluate_primal_dual_kkt_rhs(self, timer=None):
        if timer is None:
            timer = HierarchicalTimer()
        timer.start('eval grad obj')
        grad_obj = self.get_obj_factor() * self.evaluate_grad_objective()
        timer.stop('eval grad obj')
        timer.start('eval jac')
        jac_eq = self._nlp.evaluate_jacobian_eq()
        jac_ineq = self._nlp.evaluate_jacobian_ineq()
        timer.stop('eval jac')
        timer.start('eval cons')
        eq_resid = self._nlp.evaluate_eq_constraints()
        ineq_resid = self._nlp.evaluate_ineq_constraints() - self._slacks
        timer.stop('eval cons')

        timer.start('grad_lag_primals')
        grad_lag_primals = (
            grad_obj + jac_eq.transpose() * self._nlp.get_duals_eq() +
            jac_ineq.transpose() * self._nlp.get_duals_ineq() - self._barrier /
            (self._nlp.get_primals() - self._nlp.primals_lb()) +
            self._barrier / (self._nlp.primals_ub() - self._nlp.get_primals()))
        timer.stop('grad_lag_primals')

        timer.start('grad_lag_slacks')
        grad_lag_slacks = (-self._nlp.get_duals_ineq() - self._barrier /
                           (self._slacks - self._nlp.ineq_lb()) +
                           self._barrier /
                           (self._nlp.ineq_ub() - self._slacks))
        timer.stop('grad_lag_slacks')

        rhs = BlockVector(4)
        rhs.set_block(0, grad_lag_primals)
        rhs.set_block(1, grad_lag_slacks)
        rhs.set_block(2, eq_resid)
        rhs.set_block(3, ineq_resid)
        rhs = -rhs
        return rhs
    def test_mpi_schur_complement(self):
        rank_by_index = list()
        for ndx in range(3):
            for _rank in range(size):
                if (ndx - _rank) % size == 0:
                    rank_by_index.append(_rank)
        rank_by_index.append(-1)

        A = MPIBlockMatrix(nbrows=4,
                           nbcols=4,
                           rank_ownership=[
                               rank_by_index, rank_by_index, rank_by_index,
                               rank_by_index
                           ],
                           mpi_comm=comm)
        if rank_by_index[0] == rank:
            A.set_block(
                0, 0, coo_matrix(np.array([[1, 1], [0, 1]], dtype=np.double)))
        if rank_by_index[1] == rank:
            A.set_block(
                1, 1, coo_matrix(np.array([[1, 0], [0, 1]], dtype=np.double)))
        if rank_by_index[2] == rank:
            A.set_block(
                2, 2, coo_matrix(np.array([[1, 0], [1, 1]], dtype=np.double)))
        A.set_block(3, 3,
                    coo_matrix(np.array([[0, 0], [0, 1]], dtype=np.double)))
        if rank_by_index[0] == rank:
            A.set_block(
                3, 0, coo_matrix(np.array([[0, -1], [0, 0]], dtype=np.double)))
        if rank_by_index[1] == rank:
            A.set_block(
                3, 1, coo_matrix(np.array([[-1, 0], [0, -1]],
                                          dtype=np.double)))
        if rank_by_index[2] == rank:
            A.set_block(
                3, 2, coo_matrix(np.array([[0, 0], [-1, 0]], dtype=np.double)))
        A.broadcast_block_sizes()

        local_A = BlockMatrix(4, 4)
        local_A.set_block(
            0, 0, coo_matrix(np.array([[1, 1], [0, 1]], dtype=np.double)))
        local_A.set_block(
            1, 1, coo_matrix(np.array([[1, 0], [0, 1]], dtype=np.double)))
        local_A.set_block(
            2, 2, coo_matrix(np.array([[1, 0], [1, 1]], dtype=np.double)))
        local_A.set_block(
            3, 3, coo_matrix(np.array([[0, 0], [0, 1]], dtype=np.double)))
        local_A.set_block(
            3, 0, coo_matrix(np.array([[0, -1], [0, 0]], dtype=np.double)))
        local_A.set_block(
            3, 1, coo_matrix(np.array([[-1, 0], [0, -1]], dtype=np.double)))
        local_A.set_block(
            3, 2, coo_matrix(np.array([[0, 0], [-1, 0]], dtype=np.double)))
        local_A.set_block(0, 3, local_A.get_block(3, 0).transpose(copy=True))
        local_A.set_block(1, 3, local_A.get_block(3, 1).transpose(copy=True))
        local_A.set_block(2, 3, local_A.get_block(3, 2).transpose(copy=True))

        rhs = MPIBlockVector(nblocks=4,
                             rank_owner=rank_by_index,
                             mpi_comm=comm)
        if rank_by_index[0] == rank:
            rhs.set_block(0, np.array([1, 0], dtype=np.double))
        if rank_by_index[1] == rank:
            rhs.set_block(1, np.array([0, 0], dtype=np.double))
        if rank_by_index[2] == rank:
            rhs.set_block(2, np.array([0, 1], dtype=np.double))
        rhs.set_block(3, np.array([1, 1], dtype=np.double))
        rhs.broadcast_block_sizes()

        local_rhs = BlockVector(4)
        local_rhs.set_block(0, np.array([1, 0], dtype=np.double))
        local_rhs.set_block(1, np.array([0, 0], dtype=np.double))
        local_rhs.set_block(2, np.array([0, 1], dtype=np.double))
        local_rhs.set_block(3, np.array([1, 1], dtype=np.double))

        x1 = np.linalg.solve(local_A.toarray(), local_rhs.flatten())

        solver_class = parapint.linalg.MPISchurComplementLinearSolver
        sc_solver = solver_class(
            subproblem_solvers={
                ndx: ScipyInterface(compute_inertia=True)
                for ndx in range(3)
            },
            schur_complement_solver=ScipyInterface(compute_inertia=True))
        sc_solver.do_symbolic_factorization(A)
        sc_solver.do_numeric_factorization(A)
        x2 = sc_solver.do_back_solve(rhs)

        self.assertTrue(np.allclose(x1, x2.make_local_copy().flatten()))

        inertia1 = sc_solver.get_inertia()
        eig = np.linalg.eigvals(local_A.toarray())
        pos = np.count_nonzero(eig > 0)
        neg = np.count_nonzero(eig < 0)
        zero = np.count_nonzero(eig == 0)
        inertia2 = (pos, neg, zero)
        self.assertEqual(inertia1, inertia2)

        sc_solver.do_numeric_factorization(A)
        x2 = sc_solver.do_back_solve(rhs)
        self.assertTrue(np.allclose(x1, x2.make_local_copy().flatten()))
예제 #14
0
    def _create_vectors(self):

        # Note: This method requires the complicated vars nz to be defined beforehand

        # init values and lower and upper bounds
        self._init_x = BlockVector(len(self._nlps) + 1)
        self._init_y = BlockVector(len(self._nlps) + self.nblocks)
        self._lower_x = BlockVector(len(self._nlps) + 1)
        self._upper_x = BlockVector(len(self._nlps) + 1)
        self._lower_g = BlockVector(len(self._nlps) + self.nblocks)
        self._upper_g = BlockVector(len(self._nlps) + self.nblocks)
        ndx = 0
        for nlp in self._nlps:
            self._init_x.set_block(ndx, nlp.x_init())
            self._init_y.set_block(ndx, nlp.y_init())
            self._lower_x.set_block(ndx, nlp.xl())
            self._upper_x.set_block(ndx, nlp.xu())
            self._lower_g.set_block(ndx, nlp.gl())
            self._upper_g.set_block(ndx, nlp.gu())
            ndx += 1
        self._init_x.set_block(ndx, np.zeros(self.nz, dtype=np.double))
        self._lower_x.set_block(ndx, np.full(self.nz, -np.inf,
                                             dtype=np.double))
        self._upper_x.set_block(ndx, np.full(self.nz, np.inf, dtype=np.double))
        for i in range(self.nblocks):
            self._init_y.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            self._lower_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            self._upper_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            ndx += 1

        # define x maps and masks
        self._lower_x_mask = np.isfinite(self._lower_x)
        self._lower_x_map = self._lower_x_mask.nonzero()[0]
        self._upper_x_mask = np.isfinite(self._upper_x)
        self._upper_x_map = self._upper_x_mask.nonzero()[0]

        # define gcd maps and masks
        bounds_difference = self._upper_g - self._lower_g
        abs_bounds_difference = np.absolute(bounds_difference)
        tolerance_equalities = 1e-8
        self._c_mask = abs_bounds_difference < tolerance_equalities
        self._c_map = self._c_mask.nonzero()[0]
        self._d_mask = abs_bounds_difference >= tolerance_equalities
        self._d_map = self._d_mask.nonzero()[0]

        self._lower_g_mask = np.isfinite(
            self._lower_g) * self._d_mask + self._c_mask
        self._lower_g_map = self._lower_g_mask.nonzero()[0]
        self._upper_g_mask = np.isfinite(
            self._upper_g) * self._d_mask + self._c_mask
        self._upper_g_map = self._upper_g_mask.nonzero()[0]

        self._lower_d_mask = pn.isin(self._d_map, self._lower_g_map)
        self._upper_d_mask = pn.isin(self._d_map, self._upper_g_map)

        # remove empty vectors at the end of lower and upper d
        _lower_d_mask = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _lower_d_mask.set_block(i, self._lower_d_mask.get_block(i))
        self._lower_d_mask = _lower_d_mask

        _upper_d_mask = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _upper_d_mask.set_block(i, self._upper_d_mask.get_block(i))
        self._upper_d_mask = _upper_d_mask

        # define lower and upper d maps
        self._lower_d_map = pn.where(self._lower_d_mask)[0]
        self._upper_d_map = pn.where(self._upper_d_mask)[0]

        # get lower and upper d values
        self._lower_d = np.compress(self._d_mask, self._lower_g)
        self._upper_d = np.compress(self._d_mask, self._upper_g)

        # remove empty vectors at the end of lower and upper d
        _lower_d = BlockVector(self.nblocks)
        _upper_d = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _lower_d.set_block(i, self._lower_d.get_block(i))
            _upper_d.set_block(i, self._upper_d.get_block(i))
        self._lower_d = _lower_d
        self._upper_d = _upper_d
예제 #15
0
    def test_mul(self):

        mat1 = self.square_mpi_mat
        mat2 = self.square_mpi_mat2

        serial_mat1 = self.square_serial_mat
        serial_mat2 = self.square_serial_mat2

        rank = comm.Get_rank()

        bv1 = MPIBlockVector(2, [0, 1], comm)

        if rank == 0:
            bv1.set_block(0, np.arange(4, dtype=np.float64))
        if rank == 1:
            bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        bv1.broadcast_block_sizes()

        serial_bv1 = BlockVector(2)
        serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
        serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)

        res = mat1 * bv1
        serial_res = serial_mat1 * serial_bv1
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        res = mat2 * bv1
        serial_res = serial_mat2 * serial_bv1
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        bv1 = MPIBlockVector(2, [0, -1], comm)

        if rank == 0:
            bv1.set_block(0, np.arange(4, dtype=np.float64))
        bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        bv1.broadcast_block_sizes()

        res = mat1 * bv1
        serial_res = serial_mat1 * serial_bv1
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        res = mat2 * bv1
        serial_res = serial_mat2 * serial_bv1
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        # rectangular matrix
        mat1 = self.rectangular_mpi_mat
        serial_mat1 = self.rectangular_serial_mat

        bv1 = MPIBlockVector(3, [0, 1, 2], comm)

        if rank == 0:
            bv1.set_block(0, np.arange(4, dtype=np.float64))
        if rank == 1:
            bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        if rank == 2:
            bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)

        bv1.broadcast_block_sizes()

        serial_bv1 = BlockVector(3)
        serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
        serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        serial_bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)

        # with warnings.catch_warnings():
        #     warnings.simplefilter("ignore")
        res = mat1 * bv1
        serial_res = serial_mat1 * serial_bv1

        self.assertIsInstance(res, BlockVector)
        self.assertEqual(serial_res.nblocks, 2)
        self.assertEqual(res.nblocks, 2)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        bv1 = MPIBlockVector(3, [0, 1, 0], comm)

        if rank == 0:
            bv1.set_block(0, np.arange(4, dtype=np.float64))
            bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)
        if rank == 1:
            bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
        bv1.broadcast_block_sizes()

        res = mat1 * bv1
        serial_res = serial_mat1 * serial_bv1
        self.assertIsInstance(res, BlockVector)
        self.assertEqual(res.nblocks, serial_res.nblocks)
        for bid in range(serial_res.nblocks):
            self.assertTrue(
                np.allclose(res.get_block(bid), serial_res.get_block(bid)))

        res = mat1 * 3.0
        serial_res = serial_mat1 * 3.0
        self.assertIsInstance(res, MPIBlockMatrix)
        rows, columns = np.nonzero(res.ownership_mask)
        for i, j in zip(rows, columns):
            if res.get_block(i, j) is not None:
                self.assertTrue(
                    np.allclose(
                        res.get_block(i, j).toarray(),
                        serial_res.get_block(i, j).toarray()))
            else:
                self.assertIsNone(serial_res.get_block(i, j))

        res = 3.0 * mat1
        serial_res = serial_mat1 * 3.0

        self.assertIsInstance(res, MPIBlockMatrix)
        rows, columns = np.nonzero(res.ownership_mask)
        for i, j in zip(rows, columns):
            if res.get_block(i, j) is not None:
                self.assertTrue(
                    np.allclose(
                        res.get_block(i, j).toarray(),
                        serial_res.get_block(i, j).toarray()))
            else:
                self.assertIsNone(serial_res.get_block(i, j))
예제 #16
0
class TwoStageStochasticNLP(NLP):
    """
    Nonlinear program interface for composite NLP that result from
    two-stage stochastic programming problems
    """
    def __init__(self, nlps, complicating_vars):
        """

        Parameters
        ----------
        model: dictionary with scenarios (scenario names to NLPs)
        complicating_vars: dictionary with complicated variables
        (scenario names to list of variable indices)

        """
        if not isinstance(nlps, dict):
            raise RuntimeError("Model must be a dictionary")
        if not isinstance(complicating_vars, dict):
            raise RuntimeError("complicating_vars must be a dictionary")
        if len(complicating_vars) != len(nlps):
            raise RuntimeError(
                "Each scenario must have a list of complicated variables")

        # call parent class to set model
        super(TwoStageStochasticNLP, self).__init__(None)

        # initialize components
        self._initialize_nlp_components(nlps, complicating_vars)

    def _initialize_nlp_components(self, *args, **kwargs):

        nlps = args[0]
        complicating_vars = args[1]

        aux_counter = 0
        n_z = 0

        # check inputs
        for k, l in complicating_vars.items():
            if k not in nlps:
                raise RuntimeError("{} not a scenario name".format(k))
            if aux_counter == 0:
                n_z = len(l)
            else:
                if len(l) != n_z:
                    err_msg = "All scenarios must have the same number of complicated variables"
                    raise RuntimeError(err_msg)
            for val in l:
                nlp = nlps[k]
                if val > nlp.nx:
                    raise RuntimeError(
                        "Variable index cannot be greater than number of vars in NLP"
                    )
            aux_counter += 1

        # map of scenario name to indices
        self._sname_to_sid = dict()
        # map of scenario id to scenario name
        self._sid_to_sname = list()

        # populate containers
        ordered_keys = sorted(nlps.keys())
        new_dict = OrderedDict()
        self._nlps = list()
        for k in ordered_keys:
            nlp = nlps[k]
            if not isinstance(nlp, NLP):
                raise RuntimeError("Scenarios must be NLP objects")
            self._sname_to_sid[k] = len(self._sid_to_sname)
            self._sid_to_sname.append(k)
            self._nlps.append(nlp)
            # make model a dictionary of original models (PyomoModels or nl-files)
            new_dict[k] = nlp.model
        self._model = new_dict

        # set number of complicated variables
        self._nz = n_z

        # define map of complicated variables
        # this goes from [scnario_id][zid] - > vid
        self._zid_to_vid = list()
        for sid, sname in enumerate(self._sid_to_sname):
            self._zid_to_vid.append(complicating_vars[sname])

        # defines vectors
        self._create_vectors()

        # define sizes
        self._nx = self._init_x.size  # this includes nz
        self._ng = self._init_y.size
        self._nc = sum(nlp.nc for nlp in self._nlps) + self.nz * self.nblocks
        self._nd = sum(nlp.nd for nlp in self._nlps)

        # define structure of jacobians
        self._create_jacobian_structures()

        # define structure Hessian
        self._create_hessian_structure()

        # cache coupling matrices
        self._AB_csr = self.coupling_matrix()
        self._AB_coo = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
        nb = self.nblocks
        for i in range(nb):
            self._AB_coo.set_block(i, i, self._AB_csr.get_block(i, i).tocoo())
        self._AB_coo.set_block(nb, nb, self._AB_csr.get_block(nb, nb))

    def _make_unmutable_caches(self):
        # no need for caches here
        pass

    def _create_vectors(self):

        # Note: This method requires the complicated vars nz to be defined beforehand

        # init values and lower and upper bounds
        self._init_x = BlockVector(len(self._nlps) + 1)
        self._init_y = BlockVector(len(self._nlps) + self.nblocks)
        self._lower_x = BlockVector(len(self._nlps) + 1)
        self._upper_x = BlockVector(len(self._nlps) + 1)
        self._lower_g = BlockVector(len(self._nlps) + self.nblocks)
        self._upper_g = BlockVector(len(self._nlps) + self.nblocks)
        ndx = 0
        for nlp in self._nlps:
            self._init_x.set_block(ndx, nlp.x_init())
            self._init_y.set_block(ndx, nlp.y_init())
            self._lower_x.set_block(ndx, nlp.xl())
            self._upper_x.set_block(ndx, nlp.xu())
            self._lower_g.set_block(ndx, nlp.gl())
            self._upper_g.set_block(ndx, nlp.gu())
            ndx += 1
        self._init_x.set_block(ndx, np.zeros(self.nz, dtype=np.double))
        self._lower_x.set_block(ndx, np.full(self.nz, -np.inf,
                                             dtype=np.double))
        self._upper_x.set_block(ndx, np.full(self.nz, np.inf, dtype=np.double))
        for i in range(self.nblocks):
            self._init_y.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            self._lower_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            self._upper_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
            ndx += 1

        # define x maps and masks
        self._lower_x_mask = np.isfinite(self._lower_x)
        self._lower_x_map = self._lower_x_mask.nonzero()[0]
        self._upper_x_mask = np.isfinite(self._upper_x)
        self._upper_x_map = self._upper_x_mask.nonzero()[0]

        # define gcd maps and masks
        bounds_difference = self._upper_g - self._lower_g
        abs_bounds_difference = np.absolute(bounds_difference)
        tolerance_equalities = 1e-8
        self._c_mask = abs_bounds_difference < tolerance_equalities
        self._c_map = self._c_mask.nonzero()[0]
        self._d_mask = abs_bounds_difference >= tolerance_equalities
        self._d_map = self._d_mask.nonzero()[0]

        self._lower_g_mask = np.isfinite(
            self._lower_g) * self._d_mask + self._c_mask
        self._lower_g_map = self._lower_g_mask.nonzero()[0]
        self._upper_g_mask = np.isfinite(
            self._upper_g) * self._d_mask + self._c_mask
        self._upper_g_map = self._upper_g_mask.nonzero()[0]

        self._lower_d_mask = pn.isin(self._d_map, self._lower_g_map)
        self._upper_d_mask = pn.isin(self._d_map, self._upper_g_map)

        # remove empty vectors at the end of lower and upper d
        _lower_d_mask = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _lower_d_mask.set_block(i, self._lower_d_mask.get_block(i))
        self._lower_d_mask = _lower_d_mask

        _upper_d_mask = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _upper_d_mask.set_block(i, self._upper_d_mask.get_block(i))
        self._upper_d_mask = _upper_d_mask

        # define lower and upper d maps
        self._lower_d_map = pn.where(self._lower_d_mask)[0]
        self._upper_d_map = pn.where(self._upper_d_mask)[0]

        # get lower and upper d values
        self._lower_d = np.compress(self._d_mask, self._lower_g)
        self._upper_d = np.compress(self._d_mask, self._upper_g)

        # remove empty vectors at the end of lower and upper d
        _lower_d = BlockVector(self.nblocks)
        _upper_d = BlockVector(self.nblocks)
        for i in range(self.nblocks):
            _lower_d.set_block(i, self._lower_d.get_block(i))
            _upper_d.set_block(i, self._upper_d.get_block(i))
        self._lower_d = _lower_d
        self._upper_d = _upper_d

    def _create_jacobian_structures(self):

        # Note: This method requires the complicated vars map to be
        # created beforehand

        # build general jacobian
        jac_g = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            xi = nlp.x_init()
            jac_g.set_block(sid, sid, nlp.jacobian_g(xi))

            # coupling matrices Ai
            scenario_vids = self._zid_to_vid[sid]
            col = np.array([vid for vid in scenario_vids])
            row = np.arange(0, self.nz)
            data = np.ones(self.nz, dtype=np.double)
            jac_g[sid + self.nblocks, sid] = coo_matrix(
                (data, (row, col)), shape=(self.nz, nlp.nx))

            # coupling matrices Bi
            jac_g[sid + self.nblocks, self.nblocks] = -identity(self.nz)

        self._internal_jacobian_g = jac_g
        flat_jac_g = jac_g.tocoo()
        self._irows_jac_g = flat_jac_g.row
        self._jcols_jac_g = flat_jac_g.col
        self._nnz_jac_g = flat_jac_g.nnz

        # build jacobian equality constraints
        jac_c = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            xi = nlp.x_init()
            jac_c.set_block(sid, sid, nlp.jacobian_c(xi))

            # coupling matrices Ai
            scenario_vids = self._zid_to_vid[sid]
            col = np.array([vid for vid in scenario_vids])
            row = np.arange(0, self.nz)
            data = np.ones(self.nz, dtype=np.double)
            jac_c[sid + self.nblocks, sid] = coo_matrix(
                (data, (row, col)), shape=(self.nz, nlp.nx))

            # coupling matrices Bi
            jac_c[sid + self.nblocks, self.nblocks] = -identity(self.nz)

        self._internal_jacobian_c = jac_c
        flat_jac_c = jac_c.tocoo()
        self._irows_jac_c = flat_jac_c.row
        self._jcols_jac_c = flat_jac_c.col
        self._nnz_jac_c = flat_jac_c.nnz

        # build jacobian inequality constraints
        jac_d = BlockMatrix(self.nblocks, self.nblocks)
        for sid, nlp in enumerate(self._nlps):
            xi = nlp.x_init()
            jac_d.set_block(sid, sid, nlp.jacobian_d(xi))
        self._internal_jacobian_d = jac_d
        flat_jac_d = jac_d.tocoo()
        self._irows_jac_d = flat_jac_d.row
        self._jcols_jac_d = flat_jac_d.col
        self._nnz_jac_d = flat_jac_d.nnz

        # ToDo: decide if we cache _irows and _jcols pointers for composite nlp

    def _create_hessian_structure(self):

        # Note: This method requires the complicated vars map to be
        # created beforehand

        hess_lag = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            xi = nlp.x_init()
            yi = nlp.y_init()
            hess_lag.set_block(sid, sid, nlp.hessian_lag(xi, yi))

        hess_lag[self.nblocks, self.nblocks] = coo_matrix((self.nz, self.nz))

        flat_hess = hess_lag.tocoo()
        self._irows_hess = flat_hess.row
        self._jcols_hess = flat_hess.col
        self._nnz_hess_lag = flat_hess.nnz

        # ToDo: decide if we cache _irows and _jcols pointers for composite nlp

    @property
    def nblocks(self):
        """
        Returns number of blocks (nlps)
        """
        return len(self._nlps)

    @property
    def nz(self):
        """
        Return number of complicated variables
        """
        return self._nz

    def nlps(self):
        """Creates generator scenario name to nlp """
        for sid, name in enumerate(self._sid_to_sname):
            yield name, self._nlps[sid]

    def create_vector_x(self, subset=None):
        """Returns ndarray of primal variables

        Parameters
        ----------
        subset : str, optional
            determines size of vector.
            `l`: only primal variables with lower bounds
            `u`: only primal variables with upper bounds

        Returns
        -------
        BlockVector

        """
        if subset is None:
            subvectors = [np.zeros(nlp.nx, dtype=np.double) for nlp in self._nlps] + \
                         [np.zeros(self.nz, dtype=np.double)]
            return BlockVector(subvectors)
        elif subset == 'l':
            vectors = list()
            for nlp in self._nlps:
                nx_l = len(nlp._lower_x_map)
                xl = np.zeros(nx_l, dtype=np.double)
                vectors.append(xl)
            # complicated variables have no lower bounds
            vectors.append(np.zeros(0, dtype=np.double))
            return BlockVector(vectors)
        elif subset == 'u':
            vectors = list()
            for nlp in self._nlps:
                nx_u = len(nlp._upper_x_map)
                xu = np.zeros(nx_u, dtype=np.double)
                vectors.append(xu)
            # complicated variables have no upper bounds
            vectors.append(np.zeros(0, dtype=np.double))
            return BlockVector(vectors)
        else:
            raise RuntimeError('Subset not recognized')

    def create_vector_y(self, subset=None):
        """Return ndarray of vector of constraints

        Parameters
        ----------
        subset : str, optional
            determines size of vector.
            `c`: only equality constraints
            `d`: only inequality constraints
            `dl`: only inequality constraints with lower bound
            `du`: only inequality constraints with upper bound

        Returns
        -------
        BlockVector

        """
        if subset is None:
            return BlockVector(
                [np.zeros(nlp.ng, dtype=np.double) for nlp in self._nlps] + [
                    np.zeros(self.nz, dtype=np.double)
                    for i in range(self.nblocks)
                ])
        elif subset == 'c':
            return BlockVector(
                [np.zeros(nlp.nc, dtype=np.double) for nlp in self._nlps] + [
                    np.zeros(self.nz, dtype=np.double)
                    for i in range(self.nblocks)
                ])
        elif subset == 'd':
            return BlockVector(
                [np.zeros(nlp.nd, dtype=np.double) for nlp in self._nlps])
        elif subset == 'dl' or subset == 'du':
            return BlockVector(
                [nlp.create_vector_y(subset=subset) for nlp in self._nlps])
        else:
            raise RuntimeError('Subset not recognized')

    def objective(self, x, **kwargs):
        """Returns value of objective function evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.

        Returns
        -------
        float

        """
        if isinstance(x, BlockVector):
            return sum(self._nlps[i].objective(x.get_block(i))
                       for i in range(self.nblocks))
        elif isinstance(x, np.ndarray):
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            return sum(self._nlps[i].objective(x_.get_block(i))
                       for i in range(self.nblocks))
        else:
            raise NotImplementedError(
                "x must be a numpy array or a BlockVector")

    def grad_objective(self, x, out=None, **kwargs):
        """Returns gradient of the objective function evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : array_like
            Output array. Its type is preserved and it
            must be of the right shape to hold the output.

        Returns
        -------
        array_like

        """
        if out is None:
            df = self.create_vector_x()
        else:
            assert isinstance(
                out,
                BlockVector), 'Composite NLP takes block vector to evaluate g'
            assert out.nblocks == self.nblocks + 1
            assert out.size == self.nx
            df = out

        if isinstance(x, BlockVector):
            assert x.size == self.nx
            assert x.nblocks == self.nblocks + 1
            for i in range(self.nblocks):
                self._nlps[i].grad_objective(x.get_block(i),
                                             out=df.get_block(i))
            return df
        elif isinstance(x, np.ndarray):
            assert x.size == self.nx
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            for i in range(self.nblocks):
                self._nlps[i].grad_objective(x_.get_block(i),
                                             out=df.get_block(i))
            return df
        else:
            raise NotImplementedError(
                "x must be a numpy array or a BlockVector")

    def evaluate_g(self, x, out=None, **kwargs):
        """Returns general inequality constraints evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : array_like
            Output array. Its type is preserved and it
            must be of the right shape to hold the output.

        Returns
        -------
        array_like

        """
        if out is None:
            res = self.create_vector_y()
        else:
            assert isinstance(
                out,
                BlockVector), 'Composite NLP takes block vector to evaluate g'
            assert out.nblocks == 2 * self.nblocks
            assert out.size == self.ng
            res = out

        if isinstance(x, BlockVector):
            assert x.size == self.nx
            assert x.nblocks == self.nblocks + 1
            for sid in range(self.nblocks):
                # evaluate gi
                self._nlps[sid].evaluate_g(x.get_block(sid),
                                           out=res.get_block(sid))

                # evaluate coupling Ax-z
                A = self._AB_csr.get_block(sid, sid)
                res[sid +
                    self.nblocks] = A * x.get_block(sid) - x[self.nblocks]
            return res
        elif isinstance(x, np.ndarray):
            assert x.size == self.nx
            block_x = self.create_vector_x()
            block_x.copyfrom(x)  # this is expensive
            x_ = block_x
            for sid in range(self.nblocks):
                self._nlps[sid].evaluate_g(x_.get_block(sid),
                                           out=res.get_block(sid))
                # evaluate coupling Ax-z
                A = self._AB_csr.get_block(sid, sid)
                res[sid +
                    self.nblocks] = A * x_.get_block(sid) - x_[self.nblocks]
            return res
        else:
            raise NotImplementedError(
                "x must be a numpy array or a BlockVector")

    def evaluate_c(self, x, out=None, **kwargs):
        """Returns the equality constraints evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : array_like
            Output array. Its type is preserved and it
            must be of the right shape to hold the output.

        Returns
        -------
        array_like

        """

        evaluated_g = kwargs.pop('evaluated_g', None)

        if out is None:
            res = self.create_vector_y(subset='c')
        else:
            assert isinstance(
                out,
                BlockVector), 'Composite NLP takes block vector to evaluate g'
            assert out.nblocks == 2 * self.nblocks
            assert out.size == self.nc
            res = out

        if evaluated_g is not None:
            assert isinstance(evaluated_g,
                              BlockVector), 'evaluated_g must be a BlockVector'
            assert evaluated_g.nblocks == 2 * self.nblocks
            assert evaluated_g.size == self.ng
            g = evaluated_g.compress(self._c_mask)
            if out is None:
                return g
            for bid, blk in enumerate(g):
                out.set_block(bid, blk)
            return out

        if isinstance(x, BlockVector):
            assert x.size == self.nx
            assert x.nblocks == self.nblocks + 1
            for sid in range(self.nblocks):
                self._nlps[sid].evaluate_c(x.get_block(sid),
                                           out=res.get_block(sid))
                A = self._AB_csr.get_block(sid, sid)
                res[sid +
                    self.nblocks] = A * x.get_block(sid) - x[self.nblocks]
            return res
        elif isinstance(x, np.ndarray):
            assert x.size == self.nx
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            for sid in range(self.nblocks):
                self._nlps[sid].evaluate_c(x_.get_block(sid),
                                           out=res.get_block(sid))
                A = self._AB_csr.get_block(sid, sid)
                res[sid +
                    self.nblocks] = A * x_.get_block(sid) - x_[self.nblocks]
            return res
        else:
            raise NotImplementedError(
                'x must be a numpy array or a BlockVector')

    def evaluate_d(self, x, out=None, **kwargs):
        """Returns the inequality constraints evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : array_like
            Output array. Its type is preserved and it
            must be of the right shape to hold the output.

        Returns
        -------
        array_like

        """
        evaluated_g = kwargs.pop('evaluated_g', None)

        if out is None:
            res = self.create_vector_y(subset='d')
        else:
            assert isinstance(
                out,
                BlockVector), 'Composite NLP takes block vector to evaluate g'
            assert out.nblocks == self.nblocks
            assert out.size == self.nd
            res = out

        if evaluated_g is not None:
            assert isinstance(evaluated_g,
                              BlockVector), 'evaluated_g must be a BlockVector'
            assert evaluated_g.nblocks == 2 * self.nblocks
            assert evaluated_g.size == self.ng
            d = evaluated_g.compress(self._d_mask)
            if out is None:
                return BlockVector(
                    [d.get_block(j) for j in range(self.nblocks)])
            for bid in range(self.nblocks):
                out.set_block(bid, d.get_block(bid))
            return out

        if isinstance(x, BlockVector):
            assert x.size == self.nx
            assert x.nblocks == self.nblocks + 1
            for sid in range(self.nblocks):
                self._nlps[sid].evaluate_d(x.get_block(sid),
                                           out=res.get_block(sid))
            return res
        elif isinstance(x, np.ndarray):
            assert x.size == self.nx
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            for sid in range(self.nblocks):
                self._nlps[sid].evaluate_d(x_.get_block(sid),
                                           out=res.get_block(sid))
            return res
        else:
            raise NotImplementedError(
                "x must be a numpy array or a BlockVector")

    def jacobian_g(self, x, out=None, **kwargs):
        """Returns the Jacobian of the general inequalities evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : BlockMatrix, optional
            Output matrix with the structure of the jacobian already defined.

        Returns
        -------
        BlockMatrix

        """
        assert x.size == self.nx, "Dimension mismatch"

        if isinstance(x, BlockVector):
            assert x.nblocks == self.nblocks + 1
            x_ = x
        elif isinstance(x, np.ndarray):
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
        else:
            raise RuntimeError("Input vector format not recognized")

        if out is None:
            jac_g = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                jac_g.set_block(sid, sid, nlp.jacobian_g(xi))
                # coupling matrices Ai
                jac_g[sid + self.nblocks,
                      sid] = self._AB_coo.get_block(sid, sid)
                # coupling matrices Bi
                jac_g[sid + self.nblocks, self.nblocks] = -identity(self.nz)
            return jac_g
        else:
            assert isinstance(out, BlockMatrix), 'out must be a BlockMatrix'
            assert out.bshape == (2 * self.nblocks,
                                  self.nblocks + 1), "Block shape mismatch"
            jac_g = out
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                nlp.jacobian_g(xi, out=jac_g.get_block(sid, sid))
                Ai = jac_g[sid + self.nblocks, sid]
                assert Ai.shape == self._AB_coo.get_block(sid, sid).shape, \
                    'Block {} mismatch shape'.format((sid + self.nblocks, sid))
                assert Ai.nnz == self._AB_coo.get_block(sid, sid).nnz, \
                    'Block {} mismatch nnz'.format((sid + self.nblocks, sid))
                Bi = jac_g[sid + self.nblocks, self.nblocks]
                assert Bi.shape == (self.nz, self.nz), \
                    'Block {} mismatch shape'.format((sid + self.nblocks, self.nblocks))
                assert Bi.nnz == self.nz, \
                    'Block {} mismatch nnz'.format((sid + self.nblocks, self.nblocks))
            return jac_g

    def jacobian_c(self, x, out=None, **kwargs):
        """Returns the Jacobian of the equalities evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : BlockMatrix, optional
            Output matrix with the structure of the jacobian already defined.

        Returns
        -------
        BlockMatrix

        """
        assert x.size == self.nx, 'Dimension mismatch'

        if isinstance(x, BlockVector):
            assert x.nblocks == self.nblocks + 1
            x_ = x
        elif isinstance(x, np.ndarray):
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
        else:
            raise RuntimeError('Input vector format not recognized')

        if out is None:
            jac_c = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                jac_c.set_block(sid, sid, nlp.jacobian_c(xi))
                # coupling matrices Ai
                jac_c[sid + self.nblocks,
                      sid] = self._AB_coo.get_block(sid, sid)
                # coupling matrices Bi
                jac_c[sid + self.nblocks, self.nblocks] = -identity(self.nz)
            return jac_c
        else:
            assert isinstance(out, BlockMatrix), 'out must be a BlockMatrix'
            assert out.bshape == (2 * self.nblocks,
                                  self.nblocks + 1), "Block shape mismatch"
            jac_c = out
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                nlp.jacobian_c(xi, out=jac_c.get_block(sid, sid))
                Ai = jac_c[sid + self.nblocks, sid]
                assert Ai.shape == self._AB_coo.get_block(sid, sid).shape, \
                    'Block {} mismatch shape'.format((sid + self.nblocks, sid))
                assert Ai.nnz == self._AB_coo.get_block(sid, sid).nnz, \
                    'Block {} mismatch nnz'.format((sid + self.nblocks, sid))
                Bi = jac_c[sid + self.nblocks, self.nblocks]
                assert Bi.shape == (self.nz, self.nz), \
                    'Block {} mismatch shape'.format((sid + self.nblocks, self.nblocks))
                assert Bi.nnz == self.nz, \
                    'Block {} mismatch nnz'.format((sid + self.nblocks, self.nblocks))
            return jac_c

    def jacobian_d(self, x, out=None, **kwargs):
        """Returns the Jacobian of the inequalities evaluated at x

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        out : coo_matrix, optional
            Output matrix with the structure of the jacobian already defined.

        Returns
        -------
        BlockMatrix

        """
        assert x.size == self.nx, "Dimension mismatch"

        if isinstance(x, BlockVector):
            assert x.nblocks == self.nblocks + 1
            x_ = x
        elif isinstance(x, np.ndarray):
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
        else:
            raise RuntimeError('Input vector format not recognized')

        if out is None:
            jac_d = BlockMatrix(self.nblocks, self.nblocks)
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                jac_d.set_block(sid, sid, nlp.jacobian_d(xi))
            return jac_d
        else:
            assert isinstance(out, BlockMatrix), 'out must be a BlockMatrix'
            assert out.bshape == (self.nblocks,
                                  self.nblocks), 'Block shape mismatch'
            jac_d = out
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                nlp.jacobian_d(xi, out=jac_d.get_block(sid, sid))
            return jac_d

    def hessian_lag(self, x, y, out=None, **kwargs):
        """Return the Hessian of the Lagrangian function evaluated at x and y

        Parameters
        ----------
        x : array_like
            Array with values of primal variables.
        y : array_like
            Array with values of dual variables.
        out : BlockMatrix
            Output matrix with the structure of the hessian already defined. Optional

        Returns
        -------
        BlockMatrix

        """
        assert x.size == self.nx, 'Dimension mismatch'
        assert y.size == self.ng, 'Dimension mismatch'

        eval_f_c = kwargs.pop('eval_f_c', True)

        if isinstance(x, BlockVector) and isinstance(y, BlockVector):
            assert x.nblocks == self.nblocks + 1
            assert y.nblocks == 2 * self.nblocks
            x_ = x
            y_ = y
        elif isinstance(x, np.ndarray) and isinstance(y, BlockVector):
            assert y.nblocks == 2 * self.nblocks
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            y_ = y
        elif isinstance(x, BlockVector) and isinstance(y, np.ndarray):
            assert x.nblocks == self.nblocks + 1
            x_ = x
            block_y = self.create_vector_y()
            block_y.copyfrom(y)
            y_ = block_y
        elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
            block_x = self.create_vector_x()
            block_x.copyfrom(x)
            x_ = block_x
            block_y = self.create_vector_y()
            block_y.copyfrom(y)
            y_ = block_y
        else:
            raise NotImplementedError('Input vector format not recognized')

        if out is None:
            hess_lag = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                yi = y_.get_block(sid)
                hess_lag.set_block(sid, sid,
                                   nlp.hessian_lag(xi, yi, eval_f_c=eval_f_c))

            hess_lag[self.nblocks, self.nblocks] = coo_matrix(
                (self.nz, self.nz))
            return hess_lag
        else:
            assert isinstance(out, BlockMatrix), \
                'out must be a BlockMatrix'
            assert out.bshape == (self.nblocks + 1, self.nblocks + 1), \
                'Block shape mismatch'
            hess_lag = out
            for sid, nlp in enumerate(self._nlps):
                xi = x_.get_block(sid)
                yi = y_.get_block(sid)
                nlp.hessian_lag(xi,
                                yi,
                                out=hess_lag.get_block(sid, sid),
                                eval_f_c=eval_f_c)

            Hz = hess_lag[self.nblocks, self.nblocks]
            nb = self.nblocks
            assert Hz.shape == (self.nz, self.nz), \
                'out must have an {}x{} empty matrix in block {}'.format(nb,
                                                                         nb,
                                                                         (nb, nb))
            assert Hz.nnz == 0, \
                'out must have an empty matrix in block {}'.format((nb, nb))
            return hess_lag

    def block_id(self, scneario_name):
        """
        Returns idx of corresponding nlp for scenario_name

        Parameters
        ----------
        scenario_name : str
            name of scenario

        Returns
        -------
        int

        """
        return self._sname_to_sid[scneario_name]

    def block_name(self, bid):
        """
        Returns scenario name for given bid index

        Parameters
        ----------
        bid : int
            index of a given scenario

        Returns
        -------
        int

        """
        return self._sid_to_sname[bid]

    def get_block(self, scneario_name):
        """
        Returns nlp corresponding to scenario_name

        Parameters
        ----------
        scenario_name : str
            name of scenario

        Returns
        -------
        NLP

        """
        bid = self._sname_to_sid[scneario_name]
        return self._nlps[bid]

    def complicated_vars_ids(self, scenario_name):
        return self._zid_to_vid[scenario_name]

    # ToDo: order of variables?
    # ToDo: order of constraints?

    def expansion_matrix_xl(self):

        Pxl = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            Pxl.set_block(sid, sid, nlp.expansion_matrix_xl())
        Pxl[self.nblocks, self.nblocks] = coo_matrix((self.nz, 0))
        return Pxl

    def expansion_matrix_xu(self):

        Pxu = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            Pxu.set_block(sid, sid, nlp.expansion_matrix_xu())
        Pxu[self.nblocks, self.nblocks] = coo_matrix((self.nz, 0))
        return Pxu

    def expansion_matrix_dl(self):

        Pdl = BlockMatrix(self.nblocks, self.nblocks)
        for sid, nlp in enumerate(self._nlps):
            Pdl.set_block(sid, sid, nlp.expansion_matrix_dl())
        return Pdl

    def expansion_matrix_du(self):

        Pdu = BlockMatrix(self.nblocks, self.nblocks)
        for sid, nlp in enumerate(self._nlps):
            Pdu.set_block(sid, sid, nlp.expansion_matrix_du())
        return Pdu

    def coupling_matrix(self):

        AB = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
        for sid, nlp in enumerate(self._nlps):
            col = self._zid_to_vid[sid]
            row = np.arange(self.nz, dtype=np.int)
            data = np.ones(self.nz)
            AB.set_block(
                sid, sid,
                csr_matrix((data, (row, col)), shape=(self.nz, nlp.nx)))
        AB[self.nblocks, self.nblocks] = -identity(self.nz)
        return AB

    def scenarios_order(self):
        return [self._sid_to_sname[i] for i in range(self.nblocks)]
예제 #17
0
    def _setup_kkt_and_rhs_structure(self):
        # First setup the diagonal blocks
        for ndx, nlp in self._nlps.items():
            sub_kkt = BlockMatrix(nbrows=2, nbcols=2)
            n = nlp.n_primals() + nlp.n_eq_constraints(
            ) + 2 * nlp.n_ineq_constraints()
            sub_kkt.set_row_size(0, n)
            sub_kkt.set_col_size(0, n)
            if ndx == 0:
                sub_kkt.set_row_size(1, 0)
                sub_kkt.set_col_size(1, 0)
            else:
                sub_kkt.set_row_size(1, self._num_states)
                sub_kkt.set_col_size(1, self._num_states)
            row_1 = BlockMatrix(nbrows=1, nbcols=4)
            if ndx == 0:
                row_1.set_row_size(0, 0)
            else:
                row_1.set_row_size(0, self._num_states)
            row_1.set_col_size(0, nlp.n_primals())
            row_1.set_col_size(1, nlp.n_ineq_constraints())
            row_1.set_col_size(2, nlp.n_eq_constraints())
            row_1.set_col_size(3, nlp.n_ineq_constraints())
            row_1.set_block(0, 0, self._link_backward_matrices[ndx])
            sub_kkt.set_block(1, 0, row_1)
            sub_kkt.set_block(0, 1, row_1.transpose())
            self._kkt.set_block(ndx, ndx, sub_kkt)
            sub_rhs = BlockVector(2)
            sub_rhs.set_block(0, np.zeros(n))
            if ndx == 0:
                sub_rhs.set_block(1, np.zeros(0))
            else:
                sub_rhs.set_block(1, np.zeros(self._num_states))
            self._rhs.set_block(ndx, sub_rhs)

        # Setup the border blocks
        for ndx, nlp in self._nlps.items():
            nlp = self._nlps[ndx]
            block = BlockMatrix(nbrows=2, nbcols=2)
            sub_block = BlockMatrix(nbrows=self._num_time_blocks, nbcols=4)
            sub_block.set_col_size(0, nlp.n_primals())
            sub_block.set_col_size(1, nlp.n_ineq_constraints())
            sub_block.set_col_size(2, nlp.n_eq_constraints())
            sub_block.set_col_size(3, nlp.n_ineq_constraints())
            for sub_ndx in range(self._num_time_blocks):
                if sub_ndx == self._num_time_blocks - 1:
                    sub_block.set_row_size(sub_ndx, 0)
                else:
                    sub_block.set_row_size(sub_ndx, self._num_states)
            sub_block.set_block(ndx, 0, self._link_forward_matrices[ndx])
            block.set_block(0, 0, sub_block)
            block.set_block(
                1, 1, -self._link_backward_coupling_matrices[ndx].transpose())
            self._kkt.set_block(self._num_time_blocks, ndx, block)
            self._kkt.set_block(ndx, self._num_time_blocks, block.transpose())

        # Setup the bottom right block
        block = BlockMatrix(2, 2)
        rhs_block = BlockVector(2)
        sub_block = BlockMatrix(1, self._num_time_blocks)
        sub_rhs_block = BlockVector(self._num_time_blocks)
        for ndx in range(self._num_time_blocks):
            sub_block.set_block(
                0, ndx, -self._link_forward_coupling_matrices[ndx].transpose())
            if ndx == self._num_time_blocks - 1:
                sub_rhs_block.set_block(ndx, np.zeros(0))
            else:
                sub_rhs_block.set_block(ndx, np.zeros(self._num_states))
        rhs_block.set_block(0, sub_rhs_block)
        rhs_block.set_block(1, np.zeros(self._total_num_coupling_vars))
        block.set_block(1, 0, sub_block)
        block.set_block(0, 1, sub_block.transpose())
        self._kkt.set_block(self._num_time_blocks, self._num_time_blocks,
                            block)
        self._rhs.set_block(self._num_time_blocks, rhs_block)
예제 #18
0
    def _setup_block_vectors(self):
        for ndx, nlp in self._nlps.items():
            self._primals_lb.set_block(ndx, nlp.primals_lb())
            self._primals_ub.set_block(ndx, nlp.primals_ub())

            self._ineq_lb.set_block(ndx, nlp.ineq_lb())
            self._ineq_ub.set_block(ndx, nlp.ineq_ub())

            self._init_primals.set_block(ndx, nlp.init_primals())
            self._primals.set_block(ndx, nlp.init_primals().copy())
            self._delta_primals.set_block(ndx, np.zeros(nlp.n_primals()))

            self._init_slacks.set_block(ndx, nlp.init_slacks())
            self._slacks.set_block(ndx, nlp.init_slacks().copy())
            self._delta_slacks.set_block(ndx,
                                         np.zeros(nlp.n_ineq_constraints()))

            self._init_duals_ineq.set_block(ndx, nlp.init_duals_ineq())
            self._duals_ineq.set_block(ndx, nlp.init_duals_ineq().copy())
            self._delta_duals_ineq.set_block(
                ndx, np.zeros(nlp.n_ineq_constraints()))

            self._init_duals_primals_lb.set_block(ndx,
                                                  nlp.init_duals_primals_lb())
            self._duals_primals_lb.set_block(
                ndx,
                nlp.init_duals_primals_lb().copy())
            self._delta_duals_primals_lb.set_block(ndx,
                                                   np.zeros(nlp.n_primals()))

            self._init_duals_primals_ub.set_block(ndx,
                                                  nlp.init_duals_primals_ub())
            self._duals_primals_ub.set_block(
                ndx,
                nlp.init_duals_primals_ub().copy())
            self._delta_duals_primals_ub.set_block(ndx,
                                                   np.zeros(nlp.n_primals()))

            self._init_duals_slacks_lb.set_block(ndx,
                                                 nlp.init_duals_slacks_lb())
            self._duals_slacks_lb.set_block(ndx,
                                            nlp.init_duals_slacks_lb().copy())
            self._delta_duals_slacks_lb.set_block(
                ndx, np.zeros(nlp.n_ineq_constraints()))

            self._init_duals_slacks_ub.set_block(ndx,
                                                 nlp.init_duals_slacks_ub())
            self._duals_slacks_ub.set_block(ndx,
                                            nlp.init_duals_slacks_ub().copy())
            self._delta_duals_slacks_ub.set_block(
                ndx, np.zeros(nlp.n_ineq_constraints()))

            self._ineq_resid.set_block(ndx, np.zeros(nlp.n_ineq_constraints()))
            self._grad_objective.set_block(ndx, np.ones(nlp.n_primals()))

        # duals eq, eq resid
        for ndx, nlp in self._nlps.items():
            sub_block = BlockVector(3)
            sub_block.set_block(0, nlp.init_duals_eq())
            if ndx == 0:
                sub_block.set_block(1, np.zeros(0))
            else:
                sub_block.set_block(1, np.zeros(self._num_states))
            if ndx == self._num_time_blocks - 1:
                sub_block.set_block(2, np.zeros(0))
            else:
                sub_block.set_block(2, np.zeros(self._num_states))
            self._init_duals_eq.set_block(ndx, sub_block)
            self._duals_eq.set_block(ndx, sub_block.copy())
            self._delta_duals_eq.set_block(ndx, sub_block.copy_structure())
            self._eq_resid.set_block(ndx, sub_block.copy_structure() * 1)

        self._primals_lb.set_block(self._num_time_blocks,
                                   np.zeros(self._total_num_coupling_vars))
        self._primals_ub.set_block(self._num_time_blocks,
                                   np.zeros(self._total_num_coupling_vars))
        self._primals_lb.get_block(self._num_time_blocks).fill(-np.inf)
        self._primals_ub.get_block(self._num_time_blocks).fill(np.inf)

        self._init_primals.set_block(self._num_time_blocks,
                                     np.zeros(self._total_num_coupling_vars))
        self._primals.set_block(self._num_time_blocks,
                                np.zeros(self._total_num_coupling_vars))
        self._delta_primals.set_block(self._num_time_blocks,
                                      np.zeros(self._total_num_coupling_vars))

        self._init_duals_primals_lb.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))
        self._duals_primals_lb.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))
        self._delta_duals_primals_lb.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))

        self._init_duals_primals_ub.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))
        self._duals_primals_ub.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))
        self._delta_duals_primals_ub.set_block(
            self._num_time_blocks, np.zeros(self._total_num_coupling_vars))

        self._grad_objective.set_block(self._num_time_blocks,
                                       np.zeros(self._total_num_coupling_vars))
예제 #19
0
    def setUpClass(cls) -> None:
        cls.t0 = 0
        cls.delta_t = 1
        cls.num_finite_elements = 6
        cls.constant_control_duration = 2
        cls.time_scale = 1
        cls.num_time_blocks = 3
        cls.with_bounds = True
        cls.with_ineq = False
        cls.barrier_parameter = 0.1
        cls.interface = Problem(t0=cls.t0,
                                delta_t=cls.delta_t,
                                num_finite_elements=cls.num_finite_elements,
                                constant_control_duration=cls.constant_control_duration,
                                time_scale=cls.time_scale,
                                num_time_blocks=cls.num_time_blocks,
                                with_bounds=cls.with_bounds,
                                with_ineq=cls.with_ineq)
        interface = cls.interface
        num_time_blocks = cls.num_time_blocks

        primals = BlockVector(num_time_blocks + 1)
        duals_eq = BlockVector(num_time_blocks)
        duals_ineq = BlockVector(num_time_blocks)
        duals_primals_lb = BlockVector(num_time_blocks + 1)
        duals_primals_ub = BlockVector(num_time_blocks + 1)
        duals_slacks_lb = BlockVector(num_time_blocks)
        duals_slacks_ub = BlockVector(num_time_blocks)

        val_map = pe.ComponentMap()
        m = interface.pyomo_model(0)
        val_map[m.x[0]] = 0
        val_map[m.x[1]] = 1
        val_map[m.x[2]] = 2
        val_map[m.p[0]] = 0.5
        val_map[m.cons[1]] = 1
        val_map[m.cons[2]] = 2

        m = interface.pyomo_model(1)
        val_map[m.x[2]] = 2
        val_map[m.x[3]] = 3
        val_map[m.x[4]] = 4
        val_map[m.p[2]] = 1
        val_map[m.cons[3]] = 3
        val_map[m.cons[4]] = 4

        m = interface.pyomo_model(2)
        val_map[m.x[4]] = 4
        val_map[m.x[5]] = 5
        val_map[m.x[6]] = 6
        val_map[m.p[4]] = 1.5
        val_map[m.cons[5]] = 5
        val_map[m.cons[6]] = 6

        for ndx in range(num_time_blocks):
            primals.set_block(ndx, np.array([val_map[i] for i in interface.get_pyomo_variables(ndx)], dtype=np.double))
            duals_primals_lb.set_block(ndx, np.zeros(4, dtype=np.double))
            duals_primals_ub.set_block(ndx, np.array([0, 0, 0, ndx], dtype=np.double))
            sub_duals_eq = BlockVector(3)
            sub_duals_eq.set_block(0, np.array([val_map[i] for i in interface.get_pyomo_constraints(ndx)], dtype=np.double))
            if ndx == 0:
                sub_duals_eq.set_block(1, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(1, np.ones(1, dtype=np.double) * ndx)
            if ndx == num_time_blocks - 1:
                sub_duals_eq.set_block(2, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(2, np.ones(1, dtype=np.double) * ndx)
            duals_eq.set_block(ndx, sub_duals_eq)
            duals_ineq.set_block(ndx, np.zeros(0, dtype=np.double))
            duals_slacks_lb.set_block(ndx, np.zeros(0, dtype=np.double))
            duals_slacks_ub.set_block(ndx, np.zeros(0, dtype=np.double))
        primals.set_block(num_time_blocks, np.array([3, 6], dtype=np.double))
        duals_primals_lb.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        duals_primals_ub.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        interface.set_primals(primals)
        interface.set_duals_eq(duals_eq)
        interface.set_duals_ineq(duals_ineq)
        interface.set_duals_slacks_lb(duals_slacks_lb)
        interface.set_duals_slacks_ub(duals_slacks_ub)
        interface.set_duals_primals_lb(duals_primals_lb)
        interface.set_duals_primals_ub(duals_primals_ub)
        interface.set_barrier_parameter(cls.barrier_parameter)
    def setUpClass(cls) -> None:
        cls.t0 = 0
        cls.delta_t = 1
        cls.num_finite_elements = 6
        cls.constant_control_duration = 2
        cls.time_scale = 1
        cls.num_time_blocks = 3
        cls.with_bounds = True
        cls.with_ineq = False
        cls.barrier_parameter = 0.1
        cls.interface = Problem(t0=cls.t0,
                                delta_t=cls.delta_t,
                                num_finite_elements=cls.num_finite_elements,
                                constant_control_duration=cls.constant_control_duration,
                                time_scale=cls.time_scale,
                                num_time_blocks=cls.num_time_blocks,
                                with_bounds=cls.with_bounds,
                                with_ineq=cls.with_ineq)
        interface = cls.interface
        num_time_blocks = cls.num_time_blocks

        primals = BlockVector(num_time_blocks + 1)
        duals_eq = BlockVector(num_time_blocks)
        duals_ineq = BlockVector(num_time_blocks)
        duals_primals_lb = BlockVector(num_time_blocks + 1)
        duals_primals_ub = BlockVector(num_time_blocks + 1)
        duals_slacks_lb = BlockVector(num_time_blocks)
        duals_slacks_ub = BlockVector(num_time_blocks)

        ownership_map = _get_ownership_map(num_time_blocks, size)

        val_map = pe.ComponentMap()

        if ownership_map[0] == rank:
            m = interface.pyomo_model(0)
            val_map[m.x[0]] = 0
            val_map[m.x[1]] = 1
            val_map[m.x[2]] = 2
            val_map[m.p[0]] = 0.5
            val_map[m.cons[1]] = 1
            val_map[m.cons[2]] = 2

        if ownership_map[1] == rank:
            m = interface.pyomo_model(1)
            val_map[m.x[2]] = 2
            val_map[m.x[3]] = 3
            val_map[m.x[4]] = 4
            val_map[m.p[2]] = 1
            val_map[m.cons[3]] = 3
            val_map[m.cons[4]] = 4

        if ownership_map[2] == rank:
            m = interface.pyomo_model(2)
            val_map[m.x[4]] = 4
            val_map[m.x[5]] = 5
            val_map[m.x[6]] = 6
            val_map[m.p[4]] = 1.5
            val_map[m.cons[5]] = 5
            val_map[m.cons[6]] = 6

        for ndx in range(num_time_blocks):
            primals.set_block(ndx, np.zeros(4, dtype=np.double))
            duals_primals_lb.set_block(ndx, np.zeros(4, dtype=np.double))
            duals_primals_ub.set_block(ndx, np.zeros(4, dtype=np.double))
            duals_ineq.set_block(ndx, np.zeros(0, dtype=np.double))
            duals_slacks_lb.set_block(ndx, np.zeros(0, dtype=np.double))
            duals_slacks_ub.set_block(ndx, np.zeros(0, dtype=np.double))
            sub_duals_eq = BlockVector(3)
            sub_duals_eq.set_block(0, np.zeros(2, dtype=np.double))
            if ndx == 0:
                sub_duals_eq.set_block(1, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(1, np.zeros(1, dtype=np.double))
            if ndx == num_time_blocks - 1:
                sub_duals_eq.set_block(2, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(2, np.zeros(1, dtype=np.double))
            duals_eq.set_block(ndx, sub_duals_eq)
        primals.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        duals_primals_lb.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        duals_primals_ub.set_block(num_time_blocks, np.zeros(2, dtype=np.double))

        local_block_indices = _distribute_blocks(num_time_blocks, rank, size)
        for ndx in local_block_indices:
            primals.set_block(ndx, np.array([val_map[i] for i in interface.get_pyomo_variables(ndx)], dtype=np.double))
            duals_primals_ub.set_block(ndx, np.array([0, 0, 0, ndx], dtype=np.double))
            sub_duals_eq = duals_eq.get_block(ndx)
            sub_duals_eq.set_block(0, np.array([val_map[i] for i in interface.get_pyomo_constraints(ndx)], dtype=np.double))
            if ndx == 0:
                sub_duals_eq.set_block(1, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(1, np.ones(1, dtype=np.double) * ndx)
            if ndx == num_time_blocks - 1:
                sub_duals_eq.set_block(2, np.zeros(0, dtype=np.double))
            else:
                sub_duals_eq.set_block(2, np.ones(1, dtype=np.double) * ndx)

        primals_flat = primals.flatten()
        res = np.zeros(primals_flat.size, dtype=np.double)
        comm.Allreduce(primals_flat, res)
        primals.copyfrom(res)

        duals_primals_lb_flat = duals_primals_lb.flatten()
        res = np.zeros(duals_primals_lb_flat.size, dtype=np.double)
        comm.Allreduce(duals_primals_lb_flat, res)
        duals_primals_lb.copyfrom(res)

        duals_primals_ub_flat = duals_primals_ub.flatten()
        res = np.zeros(duals_primals_ub_flat.size, dtype=np.double)
        comm.Allreduce(duals_primals_ub_flat, res)
        duals_primals_ub.copyfrom(res)

        duals_eq_flat = duals_eq.flatten()
        res = np.zeros(duals_eq_flat.size, dtype=np.double)
        comm.Allreduce(duals_eq_flat, res)
        duals_eq.copyfrom(res)

        primals.set_block(num_time_blocks, np.array([3, 6], dtype=np.double))
        duals_primals_lb.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        duals_primals_ub.set_block(num_time_blocks, np.zeros(2, dtype=np.double))
        interface.set_primals(primals)
        interface.set_duals_eq(duals_eq)
        interface.set_duals_ineq(duals_ineq)
        interface.set_duals_slacks_lb(duals_slacks_lb)
        interface.set_duals_slacks_ub(duals_slacks_ub)
        interface.set_duals_primals_lb(duals_primals_lb)
        interface.set_duals_primals_ub(duals_primals_ub)
        interface.set_barrier_parameter(cls.barrier_parameter)
예제 #21
0
파일: sqp.py 프로젝트: michaelbynum/pyomo
def sqp(nlp: NLP,
        linear_solver: LinearSolverInterface,
        max_iter=100,
        tol=1e-8,
        output=True):
    """
    An example of a simple SQP algoritm for 
    equality-constrained NLPs.

    Parameters
    ----------
    nlp: NLP
        A PyNumero NLP
    max_iter: int
        The maximum number of iterations
    tol: float
        The convergence tolerance
    """
    t0 = time.time()

    # setup KKT matrix
    kkt = BlockMatrix(2, 2)
    rhs = BlockVector(2)

    # create and initialize the iteration vector
    z = BlockVector(2)
    z.set_block(0, nlp.get_primals())
    z.set_block(1, nlp.get_duals())

    if output:
        print(
            f"{'Iter':<12}{'Objective':<12}{'Primal Infeasibility':<25}{'Dual Infeasibility':<25}{'Elapsed Time':<15}"
        )

    # main iteration loop
    for _iter in range(max_iter):
        nlp.set_primals(z.get_block(0))
        nlp.set_duals(z.get_block(1))

        grad_lag = (nlp.evaluate_grad_objective() +
                    nlp.evaluate_jacobian_eq().transpose() * z.get_block(1))
        residuals = nlp.evaluate_eq_constraints()

        if output:
            print(
                f"{_iter:<12}{nlp.evaluate_objective():<12.2e}{np.abs(residuals).max():<25.2e}{np.abs(grad_lag).max():<25.2e}{time.time()-t0:<15.2e}"
            )

        if (np.abs(grad_lag).max() <= tol and np.abs(residuals).max() <= tol):
            break

        kkt.set_block(0, 0, nlp.evaluate_hessian_lag())
        kkt.set_block(1, 0, nlp.evaluate_jacobian_eq())
        kkt.set_block(0, 1, nlp.evaluate_jacobian_eq().transpose())

        rhs.set_block(0, grad_lag)
        rhs.set_block(1, residuals)

        delta, res = linear_solver.solve(kkt, -rhs)
        assert res.status == LinearSolverStatus.successful
        z += delta