Exemple #1
0
    def test_restore(self):
        # Restore from case, run, verify outputs match expected.
        top = set_as_top(SellarMDF())
        #top.name = 'top'
        top.recorders = [JSONCaseRecorder()]
        top.run()
        assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001)
        assert_rel_error(self, top.half.z2a, 0., .0001)
        assert_rel_error(self, top.sub.x1, 0., .0001)
        assert_rel_error(self, top.sub.states.y[0], 3.160004, .0001)
        assert_rel_error(self, top.sub.states.y[1], 3.755280, .0001)
        assert_rel_error(self, top.driver.eval_objective(), 3.18339413394,
                         .0001)

        cds = CaseDataset('cases.json', 'json')
        cases = cds.data.fetch()
        n_orig = len(cases)  # Typically 142

        top = set_as_top(SellarMDF())
        top._setup()
        cds.restore(top, cases[-1]['_id'])
        top.recorders = [JSONCaseRecorder('cases.restored')]
        top.run()
        assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001)
        assert_rel_error(self, top.half.z2a, 0., .0001)
        assert_rel_error(self, top.sub.x1, 0., .0001)
        assert_rel_error(self, top.sub.states.y[0], 3.160000, .0001)
        assert_rel_error(self, top.sub.states.y[1], 3.755278, .0001)
        assert_rel_error(self, top.driver.eval_objective(), 3.18339397762,
                         .0001)

        cases = CaseDataset('cases.restored', 'json').data.fetch()
        # Exact case counts are unreliable, just assure restore was quicker.
        self.assertTrue(len(cases) < n_orig / 4)  # Typically 15
Exemple #2
0
    def test_vtree(self):
        top = Assembly()
        sub = top.add('sub', Assembly())
        sub.add('comp', LoadsComp())
        sub.driver.workflow.add('comp')
        sub.create_passthrough('comp.loads_in')
        sub.create_passthrough('comp.loads_out')
        top.driver.workflow.add('sub')

        sout = StringIO()
        top.recorders = [JSONCaseRecorder(sout)]

        loads = Loads()
        loads.Fx = [1, 2, 3]
        loads.Fy = [4, 5, 6]
        loads.Fz = [7, 8, 9]
        arr = LoadsArray()
        arr.loads = [loads]
        top.sub.loads_in = arr

        top.run()

        # with open('vtree.new', 'w') as out:
        #     out.write(sout.getvalue())
        self.verify(sout, 'vtree.json')
Exemple #3
0
 def test_badval(self):
     sout = StringIO()
     self.top.recorders = [JSONCaseRecorder(sout)]
     self.top.comp1.data = self.test_badval.__func__.__code__
     assert_raises(self, 'self.top.run()', globals(), locals(), RuntimeError,
                   "JSON write failed for simulation_info.constants:"
                   " keys ['comp1.data']: <code object test_badval at")
    def test_vtree(self):
        top = Assembly()
        sub = top.add('sub', Assembly())
        sub.add('comp', LoadsComp())
        sub.driver.workflow.add('comp')
        sub.create_passthrough('comp.loads_in')
        sub.create_passthrough('comp.loads_out')
        top.driver.workflow.add('sub')

        jsonfile = os.path.join(self.tempdir, 'test_vtree.json')
        old_json_file = os.path.join(os.path.dirname(__file__), 'vtree.json')
        top.recorders = [JSONCaseRecorder(jsonfile)]

        loads = Loads()
        loads.Fx = [1, 2, 3]
        loads.Fy = [4, 5, 6]
        loads.Fz = [7, 8, 9]
        arr = LoadsArray()
        arr.loads = [loads]
        top.sub.loads_in = arr

        top.run()

        cdsnew = CaseDataset(jsonfile, 'json')
        cdsold = CaseDataset(old_json_file, 'json')

        cdsold.data.vars('sub.comp.loads_out').fetch(
        )[0][0]['loads'][0]['Fx'] == cdsnew.data.vars(
            'sub.comp.loads_out').fetch()[0][0]['loads'][0]['Fx']
        cdsold.data.vars('sub.comp.loads_out').fetch(
        )[1][0]['loads'][0]['Fz'] == cdsnew.data.vars(
            'sub.comp.loads_out').fetch()[1][0]['loads'][0]['Fz']
Exemple #5
0
    def test_jsonrecorder(self):
        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.run()

        # with open('jsonrecorder.new', 'w') as out:
        #     out.write(sout.getvalue())
        self.verify(sout, 'jsonrecorder.json')
Exemple #6
0
def create_files():
    """ Create/update test data files. """
    prob = set_as_top(SellarMDF())
    #prob.name = "top"
    prob.recorders = [
        JSONCaseRecorder('sellar_json.new'),
        BSONCaseRecorder('sellar_bson.new'),
    ]
    prob.run()
    def test_jsonrecorder_with_nest_object(self):
        # make sure bug 87740468 is fixed
        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]

        self.top.run()
        self.assertTrue(
            isinstance(self.top.comp1.cx_nested_in.cx1,
                       ComplexClass))  # should NOT be a dict
    def test_jsonrecorder(self):
        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.run()

        if os.environ.get('REGEN_JSON_FILES'):
            with open('jsonrecorder.new', 'w') as out:
                out.write(sout.getvalue())
        verify_json(self, sout, 'jsonrecorder.json')
Exemple #9
0
    def test_jsonrecorder_norun(self):
        # test ability to get model data from case recorder
        #    without calling run()

        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.record_configuration()

        # with open('jsonrecorder_norun.new', 'w') as out:
        #      out.write(sout.getvalue())
        verify_json(self, sout, 'jsonrecorder_norun.json')
    def test_bsonrecorder(self):
        # Verify bson output can be read and that it matches json.
        bson_out = StringIO()
        json_out = StringIO()
        self.top.recorders = [
            BSONCaseRecorder(bson_out),
            JSONCaseRecorder(json_out)
        ]
        self.top.run()

        json_run = json.loads(json_out.getvalue())

        inp = StringIO(bson_out.getvalue())
        reclen = unpack('<L', inp.read(4))[0]
        data = inp.read(reclen)
        obj = bson.loads(data)  # simulation_info
        keys = sorted(obj.keys())
        self.assertEqual(keys, sorted(json_run['simulation_info'].keys()))
        for key in keys:
            # graph sometimes serializes with nodes in differant order
            # between json and bson. The graphs are still equivalent, but the
            # assertion below will fail
            if key not in (
                    'uuid',
                    'graph',
            ):
                self.assertEqual(obj[key], json_run['simulation_info'][key])

        driver_count = 1
        case_count = 1
        data = inp.read(4)
        while data:
            reclen = unpack('<L', data)[0]
            data = inp.read(reclen)
            obj = bson.loads(data)  # driver_info or iteration_case
            keys = sorted(obj.keys())

            if '_driver_id' in obj:  # iteration_case
                case = 'iteration_case_%s' % case_count
                self.assertEqual(keys, sorted(json_run[case].keys()))
                for key in keys:
                    if key not in ('_driver_id', '_id', '_parent_id',
                                   'timestamp'):
                        self.assertEqual(obj[key], json_run[case][key])
                case_count += 1
            else:  # driver_info
                driver = 'driver_info_%s' % driver_count
                self.assertEqual(keys, sorted(json_run[driver].keys()))
                for key in keys:
                    if key not in ('_id', ):
                        self.assertEqual(obj[key], json_run[driver][key])
                driver_count += 1

            data = inp.read(4)
Exemple #11
0
    def test_jsonrecorder_norun(self):
        """ test ability to get model data from case recorder
            before calling run()
        """
        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.configure_recording()

        # with open('jsonrecorder_norun.new', 'w') as out:
        #     out.write(sout.getvalue())
        self.verify(sout, 'jsonrecorder_norun.json')
    def configure(self):
        self.add('paraboloid', Paraboloid())

        self.add('driver', DOEdriver())
        self.driver.DOEgenerator = Uniform(1000)

        self.driver.add_parameter('paraboloid.x', low=-50, high=50)
        self.driver.add_parameter('paraboloid.y', low=-50, high=50)

        self.driver.add_response('paraboloid.f_xy')

        self.recorders = [JSONCaseRecorder(out='doe.json')]
    def test_simple(self):
        # Make sure the CSV file can be read and has the correct number of cases

        self.top.recorders = [JSONCaseRecorder(self.filename_json)]
        self.top.recorders[0].num_backups = 0
        self.top.run()

        cds = CaseDataset(self.filename_json, 'json')
        data = cds.data.fetch()  # results
        caseset_query_to_csv(data, self.filename_csv)

        cases = [case for case in CSVCaseIterator(filename=self.filename_csv)]
Exemple #14
0
    def configure(self):
        self.add('iter', IterateUntil())
        self.add('airline_subproblem', AirlineSubProblem())
        self.add('branchbound_algorithm', BranchBoundLinear())
        #self.add('solver', LPSolver())
        self.add('solver', LinProgSolver())
        self.add('fleet_analysis', FleetAnalysis())

        #iteration hierachy
        self.driver.workflow.add(
            ['airline_subproblem', 'iter', 'fleet_analysis'])
        self.iter.workflow.add(['branchbound_algorithm', 'solver'])

        #data connections

        # Connect Airline Allocation SubProblem Component with Branch  and Bound Algorithm Component and the solver
        self.connect('airline_subproblem.f_int',
                     ['branchbound_algorithm.f_int', 'solver.f_int'])
        self.connect('airline_subproblem.f_con',
                     ['branchbound_algorithm.f_con', 'solver.f_con'])
        self.connect('airline_subproblem.A_init',
                     'branchbound_algorithm.A_init')
        self.connect('airline_subproblem.b_init',
                     'branchbound_algorithm.b_init')
        self.connect('airline_subproblem.Aeq',
                     ['branchbound_algorithm.Aeq', 'solver.A_eq'])
        self.connect('airline_subproblem.beq',
                     ['branchbound_algorithm.beq', 'solver.b_eq'])
        self.connect('airline_subproblem.lb_init',
                     'branchbound_algorithm.lb_init')
        self.connect('airline_subproblem.ub_init',
                     'branchbound_algorithm.ub_init')

        # Connect Branch  and Bound Algorithm Component with the solver component
        self.connect('branchbound_algorithm.A', 'solver.A')
        self.connect('branchbound_algorithm.b', 'solver.b')
        self.connect('branchbound_algorithm.lb', 'solver.lb')
        self.connect('branchbound_algorithm.ub', 'solver.ub')

        # Connect solver component with the Branch  and Bound Algorithm Component (return results)
        self.connect('solver.xopt', 'branchbound_algorithm.xopt_current')
        self.connect('solver.fun_opt',
                     'branchbound_algorithm.relaxed_obj_current')
        self.connect('solver.exitflag_LP', 'branchbound_algorithm.exitflag_LP')

        self.connect('branchbound_algorithm.xopt', 'fleet_analysis.xopt')

        self.iter.add_stop_condition('branchbound_algorithm.exec_loop != 0')
        self.iter.max_iterations = 1000000

        #data recording
        self.recorders = [JSONCaseRecorder('airline_allocation.json')]
Exemple #15
0
    def test_multiple_objectives(self):
        sout = StringIO()
        self.top.add('driver', SensitivityDriver())
        self.top.driver.workflow.add(['comp1', 'comp2'])
        self.top.driver.add_parameter(['comp1.x'], low=-100, high=100)
        self.top.driver.add_objective('comp1.z')
        self.top.driver.add_objective('comp2.z')

        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.run()

        # with open('multiobj.new', 'w') as out:
        #     out.write(sout.getvalue())
        self.verify(sout, 'multiobj.json')
Exemple #16
0
    def test_file_vars(self):
        sout = StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]

        self.top.recording_options.save_problem_formulation = True
        self.top.recording_options.includes = [
            'writer.x', 'writer.y', 'c2.f_xy', 'writer.file_out', 'c2.file_in'
        ]

        self.top.run()

        # with open('paraboloid.new', 'w') as out:
        #     out.write(sout.getvalue())
        self.verify(sout, 'paraboloid.json')
Exemple #17
0
    def test_options_with_includes_excludes(self):
        """ verify options with includes and excludes (excludes are processed after includes):
                save_problem_formulation = True
                includes = ['comp1']
                excludes = ['*directory', '*force_fd', '*missing_deriv_policy']
        """
        sout = StringIO.StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.recording_options.includes = ['comp1*']
        self.top.recording_options.excludes = [
            '*directory', '*force_fd', '*missing_deriv_policy'
        ]
        self.top.run()
        sout.seek(0)  # need to go back to the front of the "file"
        cds = CaseDataset(sout, 'json')

        constants = cds.simulation_info['constants'].keys()
        expected = [u'comp1.y']
        self.assertFalse(set(constants) - set(expected))

        vnames = cds.data.var_names().fetch()
        expected = [
            '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0',
            u'_pseudo_1.out0', u'comp1.derivative_exec_count',
            u'comp1.exec_count', u'comp1.itername', u'comp1.x', u'comp1.z',
            'error_message', 'error_status', 'timestamp'
        ]

        #self.assertFalse(set(vnames) - set(expected))
        self.assertFalse(set(vnames).symmetric_difference(set(expected)))

        # Specific variables are there
        names = ['comp1.z', 'comp1.x']
        vnames = cds.data.vars(names).var_names().fetch()
        self.assertEqual(vnames, names)

        cases = cds.data.vars(names).fetch()
        self.assertEqual(len(cases), 1)
        self.assertEqual(len(cases[0]), len(names))

        iteration_case_1 = {
            "comp1.x": 0.0,
            "comp1.z": 0.0,
        }
        for name, val in zip(names, cases[0]):
            self.assertAlmostEqual(val, iteration_case_1[name])
Exemple #18
0
    def configure(self):
        self.add('paraboloid', Paraboloid())

        self.add('driver', DOEdriver())
        # There are a number of different kinds of DOE available in openmdao.lib.doegenerators
        # self.driver.DOEgenerator = FullFactorial(10) #Full Factorial DOE with 10 levels for each variable
        self.driver.DOEgenerator = Uniform(1000)

        # DOEdriver will automatically record the values of any parameters for each case
        self.driver.add_parameter('paraboloid.x', low=-50, high=50)
        self.driver.add_parameter('paraboloid.y', low=-50, high=50)
        # tell the DOEdriver to also record any other variables you want to know for each case
        self.driver.add_response('paraboloid.f_xy')

        self.recorders = [
            JSONCaseRecorder('doe.json'),
            BSONCaseRecorder('doe.bson')
        ]
Exemple #19
0
    def test_includes_only(self):
        """ verify options with includes but not problem formulation:
                save_problem_formulation = False
                includes = ['comp2*']
                excludes = []
        """
        sout = StringIO.StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.recording_options.save_problem_formulation = False
        self.top.recording_options.includes = ['comp2*']
        self.top.run()

        sout.seek(0)  # need to go back to the front of the "file"
        cds = CaseDataset(sout, 'json')
        vnames = cds.data.var_names().fetch()
        expected = [
            '_driver_id', '_id', '_parent_id', u'comp2.derivative_exec_count',
            u'comp2.exec_count', u'comp2.itername', u'comp2.z',
            'error_message', 'error_status', 'timestamp'
        ]
        self.assertFalse(set(vnames) - set(expected))

        constants = cds.simulation_info['constants'].keys()
        expected = [
            u'comp2.directory', u'comp2.force_fd',
            u'comp2.missing_deriv_policy'
        ]
        self.assertFalse(set(constants) - set(expected))

        # Specific variables.
        names = ['comp2.z']
        vnames = cds.data.vars(names).var_names().fetch()
        self.assertEqual(vnames, names)

        cases = cds.data.vars(names).fetch()
        self.assertEqual(len(cases), 1)
        self.assertEqual(len(cases[0]), len(names))

        iteration_case_1 = {
            "comp2.z": 1.0,
        }
        for name, val in zip(names, cases[0]):
            self.assertAlmostEqual(val, iteration_case_1[name])
    def test_nested(self):
        asm3 = Assembly()
        asm3.add('comp1', ExecComp(exprs=['z=x+y']))
        driver = asm3.add('driver', SLSQPdriver())
        driver.workflow.add('comp1')
        driver.add_parameter('comp1.y', low=-1, high=1, start=0)
        driver.add_objective('comp1.z')
        driver.add_constraint('comp1.z >= 0')
        asm3.create_passthrough('comp1.x')
        asm3.create_passthrough('comp1.z')

        asm2 = Assembly()
        asm2.add('comp1', ExecComp(exprs=['z=x+y']))
        asm2.add('asm3', asm3)
        asm2.connect('comp1.z', 'asm3.x')
        driver = asm2.add('driver', SLSQPdriver())
        driver.workflow.add(('comp1', 'asm3'))
        driver.add_parameter('comp1.y', low=-1, high=1, start=0)
        driver.add_objective('asm3.z')
        driver.add_constraint('comp1.z >= 0')
        asm2.create_passthrough('comp1.x')
        asm2.create_passthrough('asm3.z')

        asm1 = set_as_top(Assembly())
        asm1.add('comp1', ExecComp(exprs=['z=x+y']))
        asm1.add('asm2', asm2)
        asm1.connect('comp1.z', 'asm2.x')
        driver = asm1.add('driver', SLSQPdriver())
        driver.workflow.add(('comp1', 'asm2'))
        driver.add_parameter('comp1.y', low=-1, high=1, start=0)
        driver.add_objective('asm2.z')
        driver.add_constraint('comp1.z >= 0')

        sout = StringIO()
        asm1.recorders = [JSONCaseRecorder(sout)]
        asm1.run()

        if os.environ.get('REGEN_JSON_FILES'):
            with open('nested.new', 'w') as out:
                out.write(sout.getvalue())
        verify_json(self, sout, 'nested.json')
Exemple #21
0
    def test_case_recording(self):
        cds_path = os.path.join(os.path.dirname(__file__), "cds.json")

        asm = set_as_top(SellarProblem())
        asm.architecture = MDF()
        asm.recorders = [JSONCaseRecorder(cds_path)]
        asm.run()

        cds = CaseDataset(cds_path, 'json')
        data = cds.data.by_variable().fetch()

        self.assertFalse(set(asm.solution.keys()) - set(data.keys()))

        for var in asm.solution.keys():
            self.assertTrue(data[var])
            np.allclose(data[var], asm.solution[var])

        del cds
        del data

        os.remove(cds_path)
    def test_flatten(self):
        # try it after creating some Cases
        # more rigorous checking of the csv

        outputs = ['comp1.a_array', 'comp1.vt']
        inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))]
        cases = [Case(inputs=inputs, outputs=outputs)]
        self.top.driver.clear_parameters()
        Case.set_vartree_inputs(self.top.driver, cases)
        self.top.driver.clear_responses()
        self.top.driver.add_responses(outputs)
        self.top.recorders = [JSONCaseRecorder(self.filename_json)]
        self.top.recorders[0].num_backups = 0
        self.top.run()

        cds = CaseDataset(self.filename_json, 'json')
        data = cds.data.fetch()  # results
        caseset_query_to_csv(data, self.filename_csv)

        # check recorded cases
        cases = [case for case in CSVCaseIterator(filename=self.filename_csv)]
Exemple #23
0
    def test_default_options(self):
        # verify default options:
        #        save_problem_formulation = True
        #        includes = ['*']
        #        excludes = []

        sout = StringIO.StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.run()

        sout.seek(0)  # need to go back to the front of the "file"
        cds = CaseDataset(sout, 'json')
        vnames = cds.data.var_names().fetch()
        expected = [
            '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0',
            u'_pseudo_1.out0', u'comp1.derivative_exec_count',
            u'comp1.exec_count', u'comp1.itername', u'comp1.x', u'comp1.z',
            u'comp2.derivative_exec_count', u'comp2.exec_count',
            u'comp2.itername', u'comp2.z', u'driver.workflow.itername',
            'error_message', 'error_status', 'timestamp'
        ]

        self.assertFalse(set(vnames).symmetric_difference(set(expected)))

        # Specific variables.
        names = ['comp1.x', 'comp2.z', 'comp1.z']
        vnames = cds.data.vars(names).var_names().fetch()
        self.assertEqual(vnames, names)

        cases = cds.data.vars(names).fetch()
        self.assertEqual(len(cases), 1)
        self.assertEqual(len(cases[0]), len(names))

        iteration_case_1 = {
            "comp1.x": 0.0,
            "comp1.z": 0.0,
            "comp2.z": 1.0,
        }
        for name, val in zip(names, cases[0]):
            self.assertAlmostEqual(val, iteration_case_1[name])
Exemple #24
0
    def test_problem_formulation_only(self):
        """ verify options with no includes:
                save_problem_formulation = True
                includes = []
                excludes = []
        """
        sout = StringIO.StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.recording_options.save_problem_formulation = True
        self.top.recording_options.includes = []
        self.top.run()

        sout.seek(0)  # need to go back to the front of the "file"
        cds = CaseDataset(sout, 'json')
        vnames = cds.data.var_names().fetch()
        expected = [
            '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0',
            u'_pseudo_1.out0', u'comp1.x', 'error_message', 'error_status',
            'timestamp'
        ]

        self.assertFalse(set(vnames).symmetric_difference(set(expected)))

        # Specific variables.
        names = [
            'comp1.x',
        ]
        vnames = cds.data.vars(names).var_names().fetch()
        self.assertFalse(set(vnames).symmetric_difference(set(names)))

        cases = cds.data.vars(names).fetch()
        self.assertEqual(len(cases), 1)
        self.assertEqual(len(cases[0]), len(names))

        iteration_case_1 = {
            "comp1.x": 0.0,
        }
        for name, val in zip(names, cases[0]):
            self.assertAlmostEqual(val, iteration_case_1[name])
Exemple #25
0
    def configure(self):
        self.add('driver', IterateUntil())
        self.add('branchbound_algorithm',
                 BranchBoundNonLinear(n_int=2, n_contin=0))
        #self.add('nonlinopt', BandBSLSQPdriver(n_x=2))
        self.add('nonlinopt', pyOptSparseDriver(n_x=2))
        self.nonlinopt.optimizer = "SNOPT"
        self.add('nonlin_test_prob', NonLinearTestProblem())

        #nonlin problem formulation`
        self.nonlinopt.add_parameter('nonlin_test_prob.x', low=0, high=1e3)

        self.nonlinopt.add_objective('nonlin_test_prob.f')
        self.nonlinopt.add_constraint('nonlin_test_prob.g1 < 0')
        self.nonlinopt.add_constraint('nonlin_test_prob.g2 < 0')

        #iteration hierachy
        self.driver.workflow.add(['branchbound_algorithm', 'nonlinopt'])
        self.nonlinopt.workflow.add('nonlin_test_prob')

        #data connections
        # Connect solver component with the Branch  and Bound Algorithm Component (return results)
        self.connect('nonlin_test_prob.x',
                     'branchbound_algorithm.xopt_current')
        self.connect('nonlin_test_prob.f',
                     'branchbound_algorithm.relaxed_obj_current')
        self.connect('nonlinopt.exit_flag',
                     'branchbound_algorithm.exitflag_NLP')

        # Connect Airline Allocation SubProblem Component with Branch  and Bound Algorithm Component and the solver
        # Connect Branch  and Bound Algorithm Component with the solver component
        self.connect('branchbound_algorithm.lb', 'nonlinopt.lb')
        self.connect('branchbound_algorithm.ub', 'nonlinopt.ub')

        self.driver.add_stop_condition('branchbound_algorithm.exec_loop != 0')
        self.driver.max_iterations = 1000000

        self.recorders = [JSONCaseRecorder('nonlintest.json')]
Exemple #26
0
    def test_options_with_excludes(self):
        """ verify options with excludes:
                save_problem_formulation = True
                includes = ['*']
                excludes = ['*directory', '*force_fd', '*missing_deriv_policy', '*gradient_options*']
        """
        sout = StringIO.StringIO()
        self.top.recorders = [JSONCaseRecorder(sout)]
        self.top.recording_options.excludes = [
            '*directory', '*force_fd', '*missing_deriv_policy',
            '*gradient_options*'
        ]
        self.top.run()

        sout.seek(0)  # need to go back to the front of the "file"
        cds = CaseDataset(sout, 'json')

        constants = cds.simulation_info['constants'].keys()
        expected = [
            u'recording_options.save_problem_formulation',
            u'recording_options.includes', u'comp1.y',
            u'recording_options.excludes'
        ]
        self.assertFalse(set(constants) - set(expected))
 def test_close(self):
     sout = StringIO()
     self.top.recorders = [JSONCaseRecorder(sout)]
     self.top.recorders[0].close()
     self.top.run()
     self.assertEqual(sout.getvalue(), '')
Exemple #28
0
        #       Structural Failure in Rotor Spar (ConFail)
        #       Buckling failure of spar (ConFailBuck)
        #       Tensile failure in wire (ConFailWire)
        #
        #    if flags.ConDef:
        #       Constraints on Maximum Deformation (ConDelta)


if __name__ == '__main__':
    import pylab as plt
    from makeplot import plot_single

    from openmdao.lib.casehandlers.api import JSONCaseRecorder

    opt = set_as_top(HeliOpt())
    opt.recorders.append(JSONCaseRecorder(out='heli_opt.json'))
    opt.run()

    # for reference, MATLAB solution:
    #    Omega:   1.0512
    #    Ptot:  421.3185
    print 'Parameter:  Omega       =', opt.aso.config.Omega
    print 'Constraint: Weight-Lift =', (opt.aso.Mtot * 9.8 - opt.aso.Ttot)
    print 'Objective:  Ptot        =', opt.aso.Ptot

    from openmdao.lib.casehandlers.api import CaseDataset
    dataset = CaseDataset('heli_opt.json', 'json')
    data = dataset.data.by_case().fetch()
    case = data[-1]

    plot_single(case)