示例#1
0
    def setUpClass(cls):

        # test problem 1
        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        cls.model = create_basic_dense_qp(G, A, b, c)
        cls.pyomo_nlp = PyomoNLP(cls.model)
        cls.coupling_vars = [
            cls.pyomo_nlp.variable_idx(cls.model.x[0]),
            cls.pyomo_nlp.variable_idx(cls.model.x[2])
        ]
        cls.nlp = AdmmNLP(cls.pyomo_nlp, cls.coupling_vars, rho=2.0)

        # test problem 2
        cls.model2 = create_model2()
        cls.pyomo_nlp2 = PyomoNLP(cls.model2)
        cls.coupling_vars2 = [
            cls.pyomo_nlp2.variable_idx(cls.model2.x[1]),
            cls.pyomo_nlp2.variable_idx(cls.model2.x[3]),
            cls.pyomo_nlp2.variable_idx(cls.model2.x[5])
        ]
        cls.nlp2 = AdmmNLP(cls.pyomo_nlp2, cls.coupling_vars2, rho=1.0)

        # test problem 3
        cls.model3 = create_basic_model()
        cls.pyomo_nlp3 = PyomoNLP(cls.model3)
        cls.coupling_vars3 = [cls.pyomo_nlp3.variable_idx(cls.model3.x[1])]
        cls.nlp3 = AdmmNLP(cls.pyomo_nlp3, cls.coupling_vars3, rho=1.0)
示例#2
0
    def setUpClass(cls):

        # Hessian
        cls.G = np.array([[36, 17, 19, 12, 8, 15], [17, 33, 18, 11, 7, 14],
                          [19, 18, 43, 13, 8, 16], [12, 11, 13, 18, 6, 11],
                          [8, 7, 8, 6, 9, 8], [15, 14, 16, 11, 8, 29]])

        # jacobian
        cls.A = np.array([[7, 1, 8, 3, 3, 3], [5, 0, 5, 1, 5, 8],
                          [2, 6, 7, 1, 1, 8], [1, 5, 0, 6, 1, 0]])

        cls.b = np.array([84, 62, 65, 1])
        cls.c = np.array([20, 15, 21, 18, 29, 24])

        cls.complicated_vars_ids = [4, 5]

        cls.scenarios = dict()
        cls.coupling_vars = dict()
        cls.n_scenarios = 3
        for i in range(cls.n_scenarios):
            instance = create_basic_dense_qp(cls.G, cls.A, cls.b, cls.c,
                                             cls.complicated_vars_ids)

            nlp = PyomoNLP(instance)
            scenario_name = "s{}".format(i)
            cls.scenarios[scenario_name] = nlp

            cvars = list()
            for k in cls.complicated_vars_ids:
                cvars.append(nlp.variable_idx(instance.z[k]))
            cls.coupling_vars[scenario_name] = cvars

        cls.nlp = TwoStageStochasticNLP(cls.scenarios, cls.coupling_vars)
示例#3
0
    def test_nnz_hessian_lag(self):
        self.assertEqual(self.nlp.nnz_hessian_lag, 9)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(
            m,
            complicating_vars=[m.x[1], m.x[3], m.x[5]],
            # z_estimates=[1, 2, 3],
            # w_estimates=[1, 2, 3],
            rho=1.0)
        nl = PyomoNLP(aug_model)
        self.assertEqual(self.nlp2.nnz_hessian_lag, nl.nnz_hessian_lag)
示例#4
0
    def setUpClass(cls):

        # test problem 1
        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        cls.model = create_basic_dense_qp(G, A, b, c)
        cls.pyomo_nlp = PyomoNLP(cls.model)
        cls.coupling_vars = [
            cls.pyomo_nlp.variable_idx(cls.model.x[0]),
            cls.pyomo_nlp.variable_idx(cls.model.x[2])
        ]
        cls.nlp = AdmmNLP(cls.pyomo_nlp, cls.coupling_vars, rho=2.0)
示例#5
0
    def test_grad_objective(self):

        w_estimates = np.array([5.0, 5.0])
        z_estimates = np.array([2.0, 2.0])
        rho = 2.0
        nlp = AdmmNLP(self.pyomo_nlp,
                      self.coupling_vars,
                      rho=rho,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        hessian_base = self.model.hessian_f
        c = self.model.grad_f
        A = np.array([[1, 0, 0], [0, 0, 1]], dtype=np.double)
        x = nlp.create_vector_x()
        x.fill(1.0)

        df = hessian_base.dot(x) + c
        df += A.transpose().dot(w_estimates)
        df += rho * (A.transpose().dot(A).dot(x) - A.transpose().dot(z_estimates))

        self.assertTrue(np.allclose(df, nlp.grad_objective(x)))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=3.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1], m.x[3], m.x[5]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=3.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertTrue(np.allclose(nlp.grad_objective(x), nl.grad_objective(x)))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=8.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=8.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        x.fill(1.0)
        self.assertTrue(np.allclose(nlp.grad_objective(x), nl.grad_objective(x)))
示例#6
0
    def test_objective(self):

        w_estimates = np.array([5.0, 5.0])
        z_estimates = np.array([2.0, 2.0])
        rho = 2.0
        nlp = AdmmNLP(self.pyomo_nlp,
                       self.coupling_vars,
                       rho=rho,
                       w_estimates=w_estimates,
                       z_estimates=z_estimates)

        hessian_base = self.model.hessian_f
        c = self.model.grad_f
        A = np.array([[1, 0, 0], [0, 0, 1]], dtype=np.double)
        x = nlp.create_vector_x()
        x.fill(1.0)
        f = 0.5 * x.transpose().dot(hessian_base.dot(x)) + c.dot(x)
        difference = A.dot(x) - z_estimates
        f += w_estimates.dot(difference)
        f += 0.5 * rho * np.linalg.norm(difference)**2
        self.assertEqual(f, nlp.objective(x))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=5.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1], m.x[3], m.x[5]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=5.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertAlmostEqual(nlp.objective(x), nl.objective((x)))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=7.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=7.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertAlmostEqual(nlp.objective(x), nl.objective((x)))
示例#7
0
    def test_hessian_lag(self):

        hessian_base = self.model.hessian_f
        ata = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.double)
        rho = self.nlp.rho
        admm_hessian = hessian_base + ata * rho
        x = self.nlp.create_vector_x()
        y = self.nlp.create_vector_y()
        hess_lag = self.nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        self.assertTrue(np.allclose(dense_hess_lag, admm_hessian))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=7.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1], m.x[3], m.x[5]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=7.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        y = nlp.create_vector_y()
        hess_lag = nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        hess_lagp = nl.hessian_lag(x, y)
        dense_hess_lagp = hess_lagp.todense()
        self.assertTrue(np.allclose(dense_hess_lag, dense_hess_lagp))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=1.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=1.0)
        nl = PyomoNLP(aug_model)
        x = nlp.create_vector_x()
        y = nlp.create_vector_y()
        hess_lag = nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        hess_lagp = nl.hessian_lag(x, y)
        dense_hess_lagp = hess_lagp.todense()

        self.assertTrue(np.allclose(dense_hess_lag, dense_hess_lagp))
示例#8
0
    def test_grad_objective(self):

        w_estimates = np.array([5.0, 5.0])
        z_estimates = np.array([2.0, 2.0])
        rho = 2.0
        nlp = AdmmNLP(self.pyomo_nlp,
                      self.coupling_vars,
                      rho=rho,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        hessian_base = self.model.hessian_f
        c = self.model.grad_f
        A = np.array([[1, 0, 0], [0, 0, 1]], dtype=np.double)
        x = nlp.create_vector_x()
        x.fill(1.0)

        df = hessian_base.dot(x) + c
        df += A.transpose().dot(w_estimates)
        df += rho * (A.transpose().dot(A).dot(x) -
                     A.transpose().dot(z_estimates))

        self.assertTrue(np.allclose(df, nlp.grad_objective(x)))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=3.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(
            m,
            complicating_vars=[m.x[1], m.x[3], m.x[5]],
            z_estimates=z_estimates,
            w_estimates=w_estimates,
            rho=3.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertTrue(
            np.allclose(nlp.grad_objective(x), nl.grad_objective(x)))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=8.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=8.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        x.fill(1.0)
        self.assertTrue(
            np.allclose(nlp.grad_objective(x), nl.grad_objective(x)))
示例#9
0
    def test_objective(self):

        w_estimates = np.array([5.0, 5.0])
        z_estimates = np.array([2.0, 2.0])
        rho = 2.0
        nlp = AdmmNLP(self.pyomo_nlp,
                      self.coupling_vars,
                      rho=rho,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        hessian_base = self.model.hessian_f
        c = self.model.grad_f
        A = np.array([[1, 0, 0], [0, 0, 1]], dtype=np.double)
        x = nlp.create_vector_x()
        x.fill(1.0)
        f = 0.5 * x.transpose().dot(hessian_base.dot(x)) + c.dot(x)
        difference = A.dot(x) - z_estimates
        f += w_estimates.dot(difference)
        f += 0.5 * rho * np.linalg.norm(difference)**2
        self.assertEqual(f, nlp.objective(x))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=5.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(
            m,
            complicating_vars=[m.x[1], m.x[3], m.x[5]],
            z_estimates=z_estimates,
            w_estimates=w_estimates,
            rho=5.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertAlmostEqual(nlp.objective(x), nl.objective((x)))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=7.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=7.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        self.assertAlmostEqual(nlp.objective(x), nl.objective((x)))
示例#10
0
    def test_hessian_lag(self):

        hessian_base = self.model.hessian_f
        ata = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.double)
        rho = self.nlp.rho
        admm_hessian = hessian_base + ata * rho
        x = self.nlp.create_vector_x()
        y = self.nlp.create_vector_y()
        hess_lag = self.nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        self.assertTrue(np.allclose(dense_hess_lag, admm_hessian))

        # second nlp
        w_estimates = np.array([1.0, 2.0, 3.0])
        z_estimates = np.array([3.0, 4.0, 5.0])
        nlp = AdmmNLP(self.pyomo_nlp2,
                      self.coupling_vars2,
                      rho=7.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(
            m,
            complicating_vars=[m.x[1], m.x[3], m.x[5]],
            z_estimates=z_estimates,
            w_estimates=w_estimates,
            rho=7.0)
        nl = PyomoNLP(aug_model)

        x = nlp.create_vector_x()
        y = nlp.create_vector_y()
        hess_lag = nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        hess_lagp = nl.hessian_lag(x, y)
        dense_hess_lagp = hess_lagp.todense()
        self.assertTrue(np.allclose(dense_hess_lag, dense_hess_lagp))

        # third nlp
        w_estimates = np.array([1.0])
        z_estimates = np.array([3.0])
        nlp = AdmmNLP(self.pyomo_nlp3,
                      self.coupling_vars3,
                      rho=1.0,
                      w_estimates=w_estimates,
                      z_estimates=z_estimates)

        m = self.model3
        transform = AdmmModel()
        aug_model = transform.create_using(m,
                                           complicating_vars=[m.x[1]],
                                           z_estimates=z_estimates,
                                           w_estimates=w_estimates,
                                           rho=1.0)
        nl = PyomoNLP(aug_model)
        x = nlp.create_vector_x()
        y = nlp.create_vector_y()
        hess_lag = nlp.hessian_lag(x, y)
        dense_hess_lag = hess_lag.todense()
        hess_lagp = nl.hessian_lag(x, y)
        dense_hess_lagp = hess_lagp.todense()

        self.assertTrue(np.allclose(dense_hess_lag, dense_hess_lagp))
示例#11
0
def main():
    """
    Make the flowsheet object and solve
    """
    ss_flowsheet = ss_sim.main()

    flowsheet = Flowsheet(name='MB_Model')

    # fill in values of IC parameters from steady state solve
    setICs(flowsheet, ss_flowsheet)

    # Fix variables
    setInputs(flowsheet)

    # Initialize at steady state
    initialize_ss(flowsheet, ss_flowsheet)
    mb = flowsheet.MB_fuel

    write_differential_equations(flowsheet)

    # Then perturb
    solid_x_ptb = {'Fe2O3': 0.25, 'Fe3O4': 0.01, 'Al2O3': 0.74}
    gas_y_ptb = {'CO2': 0.03999, 'H2O': 0.00001, 'CH4': 0.96}
    #perturbInputs(flowsheet,0,Solid_M=691.4,Solid_T=1283,Solid_x=solid_x_ptb,
    #        Gas_F=150,Gas_T=350,Gas_y=gas_y_ptb)
    for t in mb.t:
        perturbInputs(flowsheet, t, Solid_M=691.4)

    # should put this in a dedicated ~intialize~ function
    # that also intelligently initializes the model after perturbation
    mb.eq_d4.deactivate()
    mb.eq_d5.deactivate()
    mb.eq_d8.deactivate()
    mb.eq_d9.deactivate()
    mb.eq_d10.deactivate()
    mb.eq_g7.deactivate()
    mb.eq_g8.deactivate()
    mb.eq_g10.deactivate()
    mb.eq_g11.deactivate()
    mb.eq_g12.deactivate()
    mb.eq_g13.deactivate()
    mb.eq_g14.deactivate()
    mb.eq_g4.deactivate()
    mb.eq_g5.deactivate()
    mb.eq_g2.deactivate()
    mb.Tg_GW.fix(0.0)
    mb.Tw_GW.fix(0.0)
    mb.Tg_refractory.fix(0.0)
    mb.Tw_Wamb.fix()
    mb.Tw.fix()
    mb.Nuw.fix()
    mb.Nu_ext.fix()
    mb.hw.fix()
    mb.hext.fix()
    mb.hext2.fix()
    mb.U.fix()
    mb.Uw.fix()
    mb.Pr_ext.fix()
    mb.Ra.fix()
    mb.Re.fix()
    ###

    # other tentatively unused variables:
    mb.mFe_mAl.fix(0.0)
    mb.Solid_Out_M_Comp.fix()

    mb.eq_c5.deactivate()

    # Create a solver
    tol = 1e-8
    opt = SolverFactory('ipopt')
    opt.options = {
        'tol': tol,
        'linear_solver': 'ma57',
        'bound_push': 1e-8,
        'max_cpu_time': 600,
        'print_level': 5
    }
    #'halt_on_ampl_error': 'yes'}

    # initialized at steady state, works regardless:
    flowsheet.strip_bounds()

    #for z in mb.z:
    #    for t in mb.t:
    #        mb.Cg[z,'CH4',t].setlb(1e-8)

    for t in mb.t:
        alg_update(flowsheet, t)
        update_time_derivatives(flowsheet, t)

    print_violated_constraints(flowsheet, tol)

    nlp_ss = PyomoNLP(ss_flowsheet)
    x_ss = get_vector_from_flowsheet(nlp_ss, ss_flowsheet)
    jac_ss = nlp_ss.jacobian_g(x_ss)

    print('calculating steady state condition number...')
    ss_condition = np.linalg.cond(jac_ss.toarray())
    print('steady state condition number: ', ss_condition)

    fig1, ax1 = plt.subplots()
    ax1.jac_ss = plt.spy(jac_ss)
    ax1.set_facecolor('none')
    fig1.savefig('jac_ss.png', facecolor='none',
                 edgecolor='none')  #'#f2f2f2',edgecolor='none')

    nlp = PyomoNLP(flowsheet)
    v_order = nlp.variable_order()
    c_order = nlp.constraint_order()
    x = get_vector_from_flowsheet(nlp, flowsheet)
    lam = nlp.create_vector_y()

    jac_c = nlp.jacobian_g(x)
    hess_lag = nlp.hessian_lag(x, lam)
    kkt = BlockSymMatrix(2)
    kkt[0, 0] = hess_lag
    kkt[1, 0] = jac_c

    fig2, ax2 = plt.subplots()
    ax2.jac_c = plt.spy(jac_c)
    ax2.set_facecolor('none')
    fig2.savefig('jac_c.png', facecolor='none', edgecolor='none')

    #MA27 = hsl.MA27_LinearSolver()
    #jac_row_fortran = np.zeros(jac_c.nnz,dtype=np.intc)
    #jac_col_fortran = np.zeros(jac_c.nnz,dtype=np.intc)
    #values = jac_c.data
    #for i in range(0,jac_c.nnz):
    #    jac_row_fortran[i] = int(jac_c.row[i] + 1)
    #    jac_col_fortran[i] = int(jac_c.col[i] + 1)
    #print('Doing symbolic factorization...')
    #MA27.DoSymbolicFactorization(nlp.nx,jac_row_fortran,jac_col_fortran)

    #print(jac_row_fortran)
    #print(jac_col_fortran)
    #print('Doing numeric factorization...')
    #num_status = MA27.DoNumericFactorization(nlp.nx,values)
    #print('Status: ',num_status)

    #jac_indices = range(0,jac_c.nnz)
    #for i in range(0,jac_c.nnz):
    #    if np.abs(values[i]) <= 1e-6:
    #        print('%0.2e'%values[i],str(jac_indices[i])+'-th nonzero.',jac_c.row[i],jac_c.col[i],
    #                c_order[jac_c.row[i]],v_order[jac_c.col[i]])

    #plot_switch = 0
    #if plot_switch == 1:
    #    fig,ax = plt.subplots()
    #    jac_value_plot = ax.bar(jac_indices,values)
    #    ax.set_yscale('log')
    #    fig.savefig('plots/jac_values.png')

    print('calculating condition number...')
    condition = np.linalg.cond(jac_c.toarray())
    print('condition number: ', condition)

    #mb.input_objective = Objective(expr=sum((mb.Solid_In_M[t] -601.4)**2 for t in mb.t))

    flowsheet.write('fs_dyn.nl')

    #with open('factorized_fs.txt','w') as f:
    #    flowsheet.display(ostream=f)

    return flowsheet
示例#12
0
 def setUpClass(cls):
     # test problem 1
     cls.p1 = create_basic_model()
     cls.nlp1 = PyomoNLP(cls.p1)
     cls.p2 = create_rosenbrock_model(10)
     cls.nlp2 = PyomoNLP(cls.p2)
示例#13
0
from pyomo.pysp.ef import create_ef_instance

# define and initialize the SP
instance_factory = ScenarioTreeInstanceFactory(pysp_instance_creation_callback,
                                               nx_scenario_tree)
options = ScenarioTreeManagerFactory.register_options()
options.scenario_tree_manager = 'serial'
sp = ScenarioTreeManagerFactory(options, factory=instance_factory)
sp.initialize()

instance = create_ef_instance(sp.scenario_tree)

#instance = create_model(1.0)
print("\nHi this is PyNumero")
nlp = PyomoNLP(instance)
print("\n----------------------")
print("Problem statistics:")
print("----------------------")
print("Number of variables: {:>25d}".format(nlp.nx))
print("Number of equality constraints: {:>14d}".format(nlp.nc))
print("Number of inequality constraints: {:>11d}".format(nlp.nd))
print("Total number of constraints: {:>17d}".format(nlp.ng))
print("Number of nnz in Jacobian: {:>20d}".format(nlp.nnz_jacobian_g))
print("Number of nnz in hessian of Lagrange: {:>8d}".format(
    nlp.nnz_hessian_lag))

x = nlp.x_init()
y = nlp.create_vector_y()
y.fill(1.0)
示例#14
0
# define and initialize the SP
instance_factory = ScenarioTreeInstanceFactory(
    pysp_instance_creation_callback,
    nx_scenario_tree)
options = ScenarioTreeManagerFactory.register_options()
options.scenario_tree_manager = 'serial'
sp = ScenarioTreeManagerFactory(options,
                                factory=instance_factory)
sp.initialize()

instance = create_ef_instance(sp.scenario_tree)

#instance = create_model(1.0)
print("\nHi this is PyNumero")
nlp = PyomoNLP(instance)
print("\n----------------------")
print("Problem statistics:")
print("----------------------")
print("Number of variables: {:>25d}".format(nlp.nx))
print("Number of equality constraints: {:>14d}".format(nlp.nc))
print("Number of inequality constraints: {:>11d}".format(nlp.nd))
print("Total number of constraints: {:>17d}".format(nlp.ng))
print("Number of nnz in Jacobian: {:>20d}".format(nlp.nnz_jacobian_g))
print("Number of nnz in hessian of Lagrange: {:>8d}".format(nlp.nnz_hessian_lag))

x = nlp.x_init()
y = nlp.create_vector_y()
y.fill(1.0)

# Evaluate jacobian of all constraints