Example #1
0
    def init_mode(self, solver):
        """
        Initiates the new mode.
        """
        if self._initiate_problem:
            #Check wheter or not it involves event functions
            if self._g_nbr > 0:
                self._model.sw = [int(x) for x in solver.sw]
            if self._g0_nbr > 0:
                self._model.sw_init = [int(x) for x in self.switches_init]

            #Initiate using IPOPT
            init_nlp = NLPInitialization(self._model)
            init_nlp_ipopt = InitializationOptimizer(init_nlp)
            init_nlp_ipopt.init_opt_ipopt_solve()
            
            #Sets the calculated values
            solver.y = N.append(self._model.real_x,self._model.real_w)
            solver.yd = N.append(self._model.real_dx,[0]*len(self._model.real_w)) 
        else:
            self._model.sw = [int(x) for x in solver.sw]
            
            if self.log_events:
                self._log_initiate_mode = True #Logg f evaluations
                i = len(self._log_information) #Where to put the information
            try:
                solver.make_consistent('IDA_YA_YDP_INIT') #Calculate consistency
                self._log.debug(
                    ' Calculation of consistent initial conditions: True')
            except Sundials_Exception as data:
                print data
                print 'Failed to calculate initial conditions. Trying to continue...'
                self._log.debug(
                    ' Calculation of consistent initial conditions: True')
            
            self._log_initiate_mode = False #Stop logging f
Example #2
0
class TestNLPInit:
    """ Test evaluation of function in NLPInitialization and solution
    of initialization problems.
    
    """
    @classmethod
    def setUpClass(cls):
        """Sets up the test class."""
        fpath_daeinit = os.path.join(get_files_path(), 'Modelica',
                                     'DAEInitTest.mo')
        cpath_daeinit = "DAEInitTest"
        compile_jmu(cpath_daeinit,
                    fpath_daeinit,
                    compiler_options={
                        'state_start_values_fixed': True,
                        'variability_propagation': False
                    })

    def setUp(self):
        """Test setUp. Load the test model."""
        # Load the dynamic library and XML data
        cpath_daeinit = "DAEInitTest"
        fname_daeinit = cpath_daeinit.replace('.', '_', 1)
        self.dae_init_test = JMUModel(fname_daeinit + '.jmu')

        # This is to check that values set in the model prior to
        # creation of the NLPInitialization object are used as an
        # initial guess.
        self.dae_init_test.set('y1', 0.3)

        self.init_nlp = NLPInitialization(self.dae_init_test)
        self.init_nlp_ipopt = InitializationOptimizer(self.init_nlp)

    @testattr(ipopt=True)
    def test_init_opt_get_dimensions(self):
        """ Test NLPInitialization.init_opt_get_dimensions"""

        res_n_x = 8
        res_n_h = 8
        res_dh_n_nz = 17

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        assert N.abs(res_n_x-n_x) + N.abs(res_n_h-n_h) + \
               N.abs(res_dh_n_nz-dh_n_nz)==0

    @testattr(ipopt=True)
    def test_init_opt_get_set_x_init(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_get_x_init
        res_x_init = N.array([0, 0, 3, 4, 1, 0, 0, 0])
        x_init = N.zeros(n_x)
        self.init_nlp.init_opt_get_initial(x_init)
        #print x_init
        assert N.sum(N.abs(res_x_init - x_init)) < 1e-3

        # Test init_opt_set_x_init
        res_x_init = N.ones(n_x)
        x_init = N.ones(n_x)
        self.init_nlp.init_opt_set_initial(x_init)
        self.init_nlp.init_opt_get_initial(x_init)
        assert N.sum(N.abs(res_x_init - x_init)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_get_set_bounds(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_get_bounds
        res_x_lb = -1e20 * N.ones(n_x)
        res_x_ub = 1e20 * N.ones(n_x)
        x_lb = N.zeros(n_x)
        x_ub = N.zeros(n_x)
        self.init_nlp.init_opt_get_bounds(x_lb, x_ub)
        assert N.sum(N.abs(res_x_lb - x_lb)) < 1e-3
        assert N.sum(N.abs(res_x_lb - x_lb)) < 1e-3

        # Test init_opt_set_bounds
        res_x_lb = -5000 * N.ones(n_x)
        res_x_ub = 5000 * N.ones(n_x)
        x_lb = -5000 * N.ones(n_x)
        x_ub = 5000 * N.ones(n_x)
        self.init_nlp.init_opt_set_bounds(x_lb, x_ub)
        self.init_nlp.init_opt_get_bounds(x_lb, x_ub)
        assert N.sum(N.abs(res_x_lb - x_lb)) < 1e-3
        assert N.sum(N.abs(res_x_lb - x_lb)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_f(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_f
        res_f = N.array([0.0])
        f = N.zeros(1)
        self.init_nlp.init_opt_f(f)
        #print f
        assert N.sum(N.abs(res_f - f)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_df(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_df
        res_df = N.zeros(n_x)
        df = N.ones(n_x)
        self.init_nlp.init_opt_df(df)
        #print df
        assert N.sum(N.abs(res_df - df)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_h(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()
        # Test init_opt_h
        res_h = N.array([
            -1.98158529e+02, -2.43197505e-01, 5.12000000e+02, 5.00000000e+00,
            1.41120008e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00
        ])
        h = N.zeros(n_h)
        self.init_nlp.init_opt_h(h)
        #print h
        assert N.sum(N.abs(res_h - h)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_dh(self):
        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_dh
        res_dh = N.array([
            -1.,
            -1.,
            -135.,
            192.,
            -0.9899925,
            -1.,
            -48.,
            0.65364362,
            -1.,
            0.54030231,
            -2.,
            -1.,
            -1.,
            0.9899925,
            192.,
            -1.,
            -1.,
        ])
        dh = N.ones(dh_n_nz)
        self.init_nlp.init_opt_dh(dh)
        #print dh
        assert N.sum(N.abs(res_dh - dh)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_dh_nz_indices(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_dh_nz_indices
        res_dh_irow = N.array(
            [1, 2, 1, 3, 5, 7, 1, 2, 8, 1, 2, 6, 3, 5, 3, 4, 5])
        res_dh_icol = N.array(
            [1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8])
        dh_irow = N.zeros(dh_n_nz, dtype=N.int32)
        dh_icol = N.zeros(dh_n_nz, dtype=N.int32)
        self.init_nlp.init_opt_dh_nz_indices(dh_irow, dh_icol)
        assert N.sum(N.abs(res_dh_irow - dh_irow)) < 1e-3
        assert N.sum(N.abs(res_dh_icol - dh_icol)) < 1e-3

    @testattr(ipopt=True)
    def test_init_opt_solve(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # self.init_nlp_ipopt.init_opt_ipopt_set_string_option("derivative_test","first-order")

        self.init_nlp_ipopt.init_opt_ipopt_solve()

        print self.dae_init_test.z

        res_Z = N.array([
            5., -198.1585290151921, -0.2431975046920718, 3.0, 4.0, 1.0, 2197.0,
            5.0, -0.92009689684513785, 0., 0, 0, 0, 0, 0, 0, 0, 0
        ])

        assert max(N.abs(res_Z - self.dae_init_test.z)) < 1e-3

    @testattr(ipopt=True)
    def test_statistics(self):
        """ Test of 'jmi_init_opt_get_statistics'.
        """
        # Solve the optimization problem
        self.init_nlp_ipopt.init_opt_ipopt_solve()
        (return_status, iters, cost,
         time) = self.init_nlp_ipopt.init_opt_ipopt_get_statistics()

        assert return_status == 0
        assert abs(cost - 2.4134174e+06) < 1

    @testattr(ipopt=True)
    def test_init_opt_write_result(self):

        cpath_daeinit = "DAEInitTest"
        fname_daeinit = cpath_daeinit.replace('.', '_', 1)

        # self.init_nlp_ipopt.init_opt_ipopt_set_string_option("derivative_test","first-order")

        self.init_nlp_ipopt.init_opt_ipopt_solve()

        self.init_nlp.export_result_dymola()

        res = ResultDymolaTextual(fname_daeinit + "_result.txt")

        res_Z = N.array([
            5., -198.1585290151921, -0.2431975046920718, 3.0, 4.0, 1.0, 2197.0,
            5.0, -0.92009689684513785, 0.
        ])

        assert N.abs(res_Z[0] - res.get_variable_data("p").x[0]) < 1e-3
        assert N.abs(res_Z[1] - res.get_variable_data("der(x1)").x[0]) < 1e-3
        assert N.abs(res_Z[2] - res.get_variable_data("der(x2)").x[0]) < 1e-3
        assert N.abs(res_Z[3] - res.get_variable_data("x1").x[0]) < 1e-3
        assert N.abs(res_Z[4] - res.get_variable_data("x2").x[0]) < 1e-3
        assert N.abs(res_Z[5] - res.get_variable_data("u").x[0]) < 1e-3
        assert N.abs(res_Z[6] - res.get_variable_data("y1").x[0]) < 1e-3
        assert N.abs(res_Z[7] - res.get_variable_data("y2").x[0]) < 1e-3
        assert N.abs(res_Z[8] - res.get_variable_data("y3").x[0]) < 1e-3

    @testattr(ipopt=True)
    def test_invalid_string_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(
            Exception, self.init_nlp_ipopt.init_opt_ipopt_set_string_option,
            'invalid_option', 'val')

    @testattr(ipopt=True)
    def test_invalid_int_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(
            Exception, self.init_nlp_ipopt.init_opt_ipopt_set_int_option,
            'invalid_option', 1)

    @testattr(ipopt=True)
    def test_invalid_num_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(
            Exception, self.init_nlp_ipopt.init_opt_ipopt_set_num_option,
            'invalid_option', 1.0)
Example #3
0
    def test_linearization(self):

        # Load the dynamic library and XML data
        model = JMUModel(fname + '.jmu')

        # Create DAE initialization object.
        init_nlp = NLPInitialization(model)

        # Create an Ipopt solver object for the DAE initialization system
        init_nlp_ipopt = InitializationOptimizer(init_nlp)

        # Solve the DAE initialization system with Ipopt
        init_nlp_ipopt.init_opt_ipopt_solve()

        (E_dae,A_dae,B_dae,F_dae,g_dae,state_names,input_names,algebraic_names, \
         dx0,x0,u0,w0,t0) = linearize_dae(model)

        (A_ode, B_ode, g_ode, H_ode, M_ode,
         q_ode) = linear_dae_to_ode(E_dae, A_dae, B_dae, F_dae, g_dae)

        (A_ode2,B_ode2,g_ode2,H_ode2,M_ode2,q_ode2,state_names2,input_names2,algebraic_names2, \
         dx02,x02,u02,w02,t02) = linearize_ode(model)

        N.testing.assert_array_almost_equal(
            A_ode, A_ode2, err_msg="Error in linearization: A_ode.")
        N.testing.assert_array_almost_equal(
            B_ode, B_ode2, err_msg="Error in linearization: B_ode.")
        N.testing.assert_array_almost_equal(
            g_ode, g_ode2, err_msg="Error in linearization: g_ode.")
        N.testing.assert_array_almost_equal(
            H_ode, H_ode2, err_msg="Error in linearization: H_ode.")
        N.testing.assert_array_almost_equal(
            M_ode, M_ode2, err_msg="Error in linearization: M_ode.")
        N.testing.assert_array_almost_equal(
            q_ode, q_ode2, err_msg="Error in linearization: q_ode.")
        assert (state_names == state_names2) == True
        assert (input_names == input_names2) == True
        assert (algebraic_names == algebraic_names2) == True

        small = 1e-4
        assert (
            N.abs(A_ode -
                  N.array([[-0.00000000e+00, 1.00000000e+03, 6.00000000e+01],
                           [-0.00000000e+00, -1.66821993e-02, -1.19039519e+00],
                           [-0.00000000e+00, 3.48651310e-03, 2.14034026e-01]]))
            <= small).all() == True
        assert (N.abs(
            B_ode -
            N.array([[1.00000000e+02], [-0.00000000e+00], [3.49859575e-02]]))
                <= small).all() == True
        assert (N.abs(g_ode - N.array([[-0.], [-0.], [-0.]])) <=
                small).all() == True

        assert N.abs(
            E_dae - N.array(([[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])) <=
            small).all() == True
        assert (N.abs(
            A_dae -
            N.array([[-0.00000000e+00, -1.00000000e+03, -6.00000000e+01],
                     [-0.00000000e+00, 1.66821993e-02, 1.19039519e+00],
                     [-0.00000000e+00, -3.48651310e-03, -2.14034026e-01]])) <=
                small).all() == True
        assert (N.abs(
            B_dae -
            N.array([[-1.00000000e+02], [-0.00000000e+00], [-3.49859575e-02]]))
                <= small).all() == True
        assert (N.abs(g_dae - N.array([[-0.], [-0.], [-0.]])) <=
                small).all() == True

        assert (state_names == ['cost', 'cstr.c', 'cstr.T']) == True
        assert (input_names == ['u']) == True
        assert (algebraic_names == []) == True
    def test_linearization(self):

        # Load the dynamic library and XML data
        model = JMUModel(fname + ".jmu")

        # Create DAE initialization object.
        init_nlp = NLPInitialization(model)

        # Create an Ipopt solver object for the DAE initialization system
        init_nlp_ipopt = InitializationOptimizer(init_nlp)

        # Solve the DAE initialization system with Ipopt
        init_nlp_ipopt.init_opt_ipopt_solve()

        (
            E_dae,
            A_dae,
            B_dae,
            F_dae,
            g_dae,
            state_names,
            input_names,
            algebraic_names,
            dx0,
            x0,
            u0,
            w0,
            t0,
        ) = linearize_dae(model)

        (A_ode, B_ode, g_ode, H_ode, M_ode, q_ode) = linear_dae_to_ode(E_dae, A_dae, B_dae, F_dae, g_dae)

        (
            A_ode2,
            B_ode2,
            g_ode2,
            H_ode2,
            M_ode2,
            q_ode2,
            state_names2,
            input_names2,
            algebraic_names2,
            dx02,
            x02,
            u02,
            w02,
            t02,
        ) = linearize_ode(model)

        N.testing.assert_array_almost_equal(A_ode, A_ode2, err_msg="Error in linearization: A_ode.")
        N.testing.assert_array_almost_equal(B_ode, B_ode2, err_msg="Error in linearization: B_ode.")
        N.testing.assert_array_almost_equal(g_ode, g_ode2, err_msg="Error in linearization: g_ode.")
        N.testing.assert_array_almost_equal(H_ode, H_ode2, err_msg="Error in linearization: H_ode.")
        N.testing.assert_array_almost_equal(M_ode, M_ode2, err_msg="Error in linearization: M_ode.")
        N.testing.assert_array_almost_equal(q_ode, q_ode2, err_msg="Error in linearization: q_ode.")
        assert (state_names == state_names2) == True
        assert (input_names == input_names2) == True
        assert (algebraic_names == algebraic_names2) == True

        small = 1e-4
        assert (
            N.abs(
                A_ode
                - N.array(
                    [
                        [-0.00000000e00, 1.00000000e03, 6.00000000e01],
                        [-0.00000000e00, -1.66821993e-02, -1.19039519e00],
                        [-0.00000000e00, 3.48651310e-03, 2.14034026e-01],
                    ]
                )
            )
            <= small
        ).all() == True
        assert (N.abs(B_ode - N.array([[1.00000000e02], [-0.00000000e00], [3.49859575e-02]])) <= small).all() == True
        assert (N.abs(g_ode - N.array([[-0.0], [-0.0], [-0.0]])) <= small).all() == True

        assert (
            N.abs(
                E_dae - N.array(([[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0]])) <= small
            ).all()
            == True
        )
        assert (
            N.abs(
                A_dae
                - N.array(
                    [
                        [-0.00000000e00, -1.00000000e03, -6.00000000e01],
                        [-0.00000000e00, 1.66821993e-02, 1.19039519e00],
                        [-0.00000000e00, -3.48651310e-03, -2.14034026e-01],
                        [-0.00000000e00, -0.00000000e00, -0.00000000e00],
                    ]
                )
            )
            <= small
        ).all() == True
        assert (
            N.abs(B_dae - N.array([[-0.00000000e00], [-0.00000000e00], [-0.00000000e00], [1.00000000e00]])) <= small
        ).all() == True
        assert (N.abs(g_dae - N.array([[-0.0], [-0.0], [-0.0], [-0.0]])) <= small).all() == True

        assert (state_names == ["cost", "cstr.c", "cstr.T"]) == True
        assert (input_names == ["u"]) == True
        assert (algebraic_names == ["cstr.Tc"]) == True
Example #5
0
class TestNLPInit:
    """ Test evaluation of function in NLPInitialization and solution
    of initialization problems.
    
    """
    @classmethod
    def setUpClass(cls):
        """Sets up the test class."""
        fpath_daeinit = os.path.join(get_files_path(), 'Modelica', 
            'DAEInitTest.mo')
        cpath_daeinit = "DAEInitTest"
        compile_jmu(cpath_daeinit, fpath_daeinit, 
            compiler_options={'state_start_values_fixed':True, 'variability_propagation':False})
        
    def setUp(self):
        """Test setUp. Load the test model."""                    
        # Load the dynamic library and XML data
        cpath_daeinit = "DAEInitTest"
        fname_daeinit = cpath_daeinit.replace('.','_',1)
        self.dae_init_test = JMUModel(fname_daeinit+'.jmu')

        # This is to check that values set in the model prior to
        # creation of the NLPInitialization object are used as an
        # initial guess.
        self.dae_init_test.set('y1',0.3)
    
        self.init_nlp = NLPInitialization(self.dae_init_test)
        self.init_nlp_ipopt = InitializationOptimizer(self.init_nlp)


    @testattr(ipopt = True)    
    def test_init_opt_get_dimensions(self):
        """ Test NLPInitialization.init_opt_get_dimensions"""
    
        res_n_x = 8
        res_n_h = 8
        res_dh_n_nz = 17
    
        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()
    
        assert N.abs(res_n_x-n_x) + N.abs(res_n_h-n_h) + \
               N.abs(res_dh_n_nz-dh_n_nz)==0

    @testattr(ipopt = True)    
    def test_init_opt_get_set_x_init(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()
    
        # Test init_opt_get_x_init
        res_x_init = N.array([0,0,3,4,1,0,0,0])
        x_init = N.zeros(n_x)
        self.init_nlp.init_opt_get_initial(x_init)
        #print x_init
        assert N.sum(N.abs(res_x_init-x_init))<1e-3 
    
        # Test init_opt_set_x_init
        res_x_init = N.ones(n_x)
        x_init = N.ones(n_x)
        self.init_nlp.init_opt_set_initial(x_init)
        self.init_nlp.init_opt_get_initial(x_init)
        assert N.sum(N.abs(res_x_init-x_init))<1e-3 

    @testattr(ipopt = True)    
    def test_init_opt_get_set_bounds(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_get_bounds
        res_x_lb = -1e20*N.ones(n_x)
        res_x_ub = 1e20*N.ones(n_x)
        x_lb = N.zeros(n_x)
        x_ub = N.zeros(n_x)
        self.init_nlp.init_opt_get_bounds(x_lb,x_ub)
        assert N.sum(N.abs(res_x_lb-x_lb))<1e-3 
        assert N.sum(N.abs(res_x_lb-x_lb))<1e-3
    
        # Test init_opt_set_bounds
        res_x_lb = -5000*N.ones(n_x)
        res_x_ub = 5000*N.ones(n_x)
        x_lb = -5000*N.ones(n_x)
        x_ub = 5000*N.ones(n_x)
        self.init_nlp.init_opt_set_bounds(x_lb,x_ub)
        self.init_nlp.init_opt_get_bounds(x_lb,x_ub)
        assert N.sum(N.abs(res_x_lb-x_lb))<1e-3
        assert N.sum(N.abs(res_x_lb-x_lb))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_f(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()
    
        # Test init_opt_f
        res_f = N.array([0.0])
        f = N.zeros(1)
        self.init_nlp.init_opt_f(f)
        #print f
        assert N.sum(N.abs(res_f-f))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_df(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_df
        res_df = N.zeros(n_x)
        df = N.ones(n_x)
        self.init_nlp.init_opt_df(df)
        #print df
        assert N.sum(N.abs(res_df-df))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_h(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()
        # Test init_opt_h
        res_h = N.array([ -1.98158529e+02,  -2.43197505e-01,   5.12000000e+02,   5.00000000e+00,
                          1.41120008e-01,   0.00000000e+00,   0.00000000e+00,   0.00000000e+00])
        h = N.zeros(n_h)
        self.init_nlp.init_opt_h(h)
        #print h
        assert N.sum(N.abs(res_h-h))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_dh(self):
        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_dh
        res_dh = N.array([ -1.,           -1.,         -135.,          192.,           -0.9899925,    -1.,
                           -48.,            0.65364362,   -1.,            0.54030231,   -2.,           -1.,
                           -1.,            0.9899925,   192.,           -1.,           -1.,        ])
        dh = N.ones(dh_n_nz)
        self.init_nlp.init_opt_dh(dh)
        #print dh
        assert N.sum(N.abs(res_dh-dh))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_dh_nz_indices(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

        # Test init_opt_dh_nz_indices
        res_dh_irow = N.array([1, 2, 1, 3, 5, 7, 1, 2, 8, 1, 2, 6, 3, 5, 3, 4, 5])
        res_dh_icol = N.array([1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8])
        dh_irow = N.zeros(dh_n_nz,dtype=N.int32)
        dh_icol = N.zeros(dh_n_nz,dtype=N.int32)
        self.init_nlp.init_opt_dh_nz_indices(dh_irow,dh_icol)
        assert N.sum(N.abs(res_dh_irow-dh_irow))<1e-3
        assert N.sum(N.abs(res_dh_icol-dh_icol))<1e-3

    @testattr(ipopt = True)    
    def test_init_opt_solve(self):

        n_x, n_h, dh_n_nz = self.init_nlp.init_opt_get_dimensions()

    
        # self.init_nlp_ipopt.init_opt_ipopt_set_string_option("derivative_test","first-order")
        
        self.init_nlp_ipopt.init_opt_ipopt_solve()

        print self.dae_init_test.z
    
        res_Z = N.array([5.,
                         -198.1585290151921,
                         -0.2431975046920718,
                         3.0,
                         4.0,
                         1.0,
                         2197.0,
                         5.0,
                         -0.92009689684513785,
                         0.,0,0,0,0,0,0,0,0])
    
        assert max(N.abs(res_Z-self.dae_init_test.z))<1e-3

    @testattr(ipopt = True)
    def test_statistics(self):
        """ Test of 'jmi_init_opt_get_statistics'.
        """
        # Solve the optimization problem
        self.init_nlp_ipopt.init_opt_ipopt_solve()
        (return_status,iters,cost,time) = self.init_nlp_ipopt.init_opt_ipopt_get_statistics()

        assert return_status==0
        assert abs(cost-2.4134174e+06)<1

        
    @testattr(ipopt = True)    
    def test_init_opt_write_result(self):

        cpath_daeinit = "DAEInitTest"
        fname_daeinit = cpath_daeinit.replace('.','_',1)
    
        # self.init_nlp_ipopt.init_opt_ipopt_set_string_option("derivative_test","first-order")
        
        self.init_nlp_ipopt.init_opt_ipopt_solve()

        self.init_nlp.export_result_dymola()
        
        res = ResultDymolaTextual(fname_daeinit + "_result.txt")

        res_Z = N.array([5.,
                         -198.1585290151921,
                         -0.2431975046920718,
                         3.0,
                         4.0,
                         1.0,
                         2197.0,
                         5.0,
                         -0.92009689684513785,
                         0.])

        assert N.abs(res_Z[0] - res.get_variable_data("p").x[0])<1e-3 
        assert N.abs(res_Z[1] - res.get_variable_data("der(x1)").x[0])<1e-3
        assert N.abs(res_Z[2] - res.get_variable_data("der(x2)").x[0])<1e-3
        assert N.abs(res_Z[3] - res.get_variable_data("x1").x[0])<1e-3
        assert N.abs(res_Z[4] - res.get_variable_data("x2").x[0])<1e-3
        assert N.abs(res_Z[5] - res.get_variable_data("u").x[0])<1e-3
        assert N.abs(res_Z[6] - res.get_variable_data("y1").x[0])<1e-3
        assert N.abs(res_Z[7] - res.get_variable_data("y2").x[0])<1e-3
        assert N.abs(res_Z[8] - res.get_variable_data("y3").x[0])<1e-3
        
    @testattr(ipopt = True)
    def test_invalid_string_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(Exception, self.init_nlp_ipopt.init_opt_ipopt_set_string_option, 'invalid_option','val')

    @testattr(ipopt = True)
    def test_invalid_int_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(Exception, self.init_nlp_ipopt.init_opt_ipopt_set_int_option, 'invalid_option',1)

    @testattr(ipopt = True)
    def test_invalid_num_option(self):
        """Test that exceptions are thrown when invalid IPOPT options are set."""
        nose.tools.assert_raises(Exception, self.init_nlp_ipopt.init_opt_ipopt_set_num_option, 'invalid_option',1.0)