def test_recording_remote_voi(self):
        # Create a parallel model
        model = Group()

        model.add_subsystem('par', ParallelGroup())
        model.par.add_subsystem('G1', Mygroup())
        model.par.add_subsystem('G2', Mygroup())
        model.connect('par.G1.y', 'Obj.y1')
        model.connect('par.G2.y', 'Obj.y2')

        model.add_subsystem('Obj', ExecComp('obj=y1+y2'))
        model.add_objective('Obj.obj')

        # Configure driver to record VOIs on both procs
        driver = ScipyOptimizeDriver(disp=False)

        driver.recording_options['record_desvars'] = True
        driver.recording_options['record_responses'] = True
        driver.recording_options['record_objectives'] = True
        driver.recording_options['record_constraints'] = True
        driver.recording_options['includes'] = ['par.G1.y', 'par.G2.y']

        driver.add_recorder(self.recorder)

        # Create problem and run driver
        prob = Problem(model, driver)
        prob.add_recorder(self.recorder)
        prob.setup()

        t0, t1 = run_driver(prob)
        prob.record_iteration('final')
        t2 = time()

        prob.cleanup()

        # Since the test will compare the last case recorded, just check the
        # current values in the problem. This next section is about getting those values

        # These involve collective gathers so all ranks need to run this
        expected_outputs = driver.get_design_var_values()
        expected_outputs.update(driver.get_objective_values())
        expected_outputs.update(driver.get_constraint_values())

        # includes for outputs are specified as promoted names but we need absolute names
        prom2abs = model._var_allprocs_prom2abs_list['output']
        abs_includes = [prom2abs[n][0] for n in prob.driver.recording_options['includes']]

        # Absolute path names of includes on this rank
        rrank = model.comm.rank
        rowned = model._owning_rank
        local_includes = [n for n in abs_includes if rrank == rowned[n]]

        # Get values for all vars on this rank
        inputs, outputs, residuals = model.get_nonlinear_vectors()

        # Get values for includes on this rank
        local_vars = {n: outputs[n] for n in local_includes}

        # Gather values for includes on all ranks
        all_vars = model.comm.gather(local_vars, root=0)

        if prob.comm.rank == 0:
            # Only on rank 0 do we have all the values. The all_vars variable is a list of
            # dicts from all ranks 0,1,... In this case, just ranks 0 and 1
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)

            expected_includes = {
                'par.G1.Cy.y': dct['par.G1.Cy.y'],
                'par.G2.Cy.y': dct['par.G2.Cy.y'],
            }

            expected_outputs.update(expected_includes)

            coordinate = [0, 'ScipyOptimize_SLSQP', (driver.iter_count-1,)]

            expected_data = ((coordinate, (t0, t1), expected_outputs, None),)
            assertDriverIterDataRecorded(self, expected_data, self.eps)

            expected_data = (('final', (t1, t2), expected_outputs),)
            assertProblemDataRecorded(self, expected_data, self.eps)
    def test_recording_remote_voi(self):
        # Create a parallel model
        model = Group()

        model.add_subsystem('par', ParallelGroup())
        model.par.add_subsystem('G1', Mygroup())
        model.par.add_subsystem('G2', Mygroup())
        model.connect('par.G1.y', 'Obj.y1')
        model.connect('par.G2.y', 'Obj.y2')

        model.add_subsystem('Obj', ExecComp('obj=y1+y2'))
        model.add_objective('Obj.obj')

        # Configure driver to record VOIs on both procs
        driver = ScipyOptimizeDriver(disp=False)

        driver.recording_options['record_desvars'] = True
        driver.recording_options['record_responses'] = True
        driver.recording_options['record_objectives'] = True
        driver.recording_options['record_constraints'] = True
        driver.recording_options['includes'] = ['par.G1.y', 'par.G2.y']

        driver.add_recorder(self.recorder)

        # Create problem and run driver
        prob = Problem(model, driver)
        prob.add_recorder(self.recorder)
        prob.setup()

        t0, t1 = run_driver(prob)
        prob.record_iteration('final')
        t2 = time()

        prob.cleanup()

        # Since the test will compare the last case recorded, just check the
        # current values in the problem. This next section is about getting those values

        # These involve collective gathers so all ranks need to run this
        expected_outputs = driver.get_design_var_values()
        expected_outputs.update(driver.get_objective_values())
        expected_outputs.update(driver.get_constraint_values())

        # includes for outputs are specified as promoted names but we need absolute names
        prom2abs = model._var_allprocs_prom2abs_list['output']
        abs_includes = [
            prom2abs[n][0] for n in prob.driver.recording_options['includes']
        ]

        # Absolute path names of includes on this rank
        rrank = model.comm.rank
        rowned = model._owning_rank
        local_includes = [n for n in abs_includes if rrank == rowned[n]]

        # Get values for all vars on this rank
        inputs, outputs, residuals = model.get_nonlinear_vectors()

        # Get values for includes on this rank
        local_vars = {n: outputs[n] for n in local_includes}

        # Gather values for includes on all ranks
        all_vars = model.comm.gather(local_vars, root=0)

        if prob.comm.rank == 0:
            # Only on rank 0 do we have all the values. The all_vars variable is a list of
            # dicts from all ranks 0,1,... In this case, just ranks 0 and 1
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)

            expected_includes = {
                'par.G1.Cy.y': dct['par.G1.Cy.y'],
                'par.G2.Cy.y': dct['par.G2.Cy.y'],
            }

            expected_outputs.update(expected_includes)

            coordinate = [0, 'ScipyOptimize_SLSQP', (driver.iter_count - 1, )]

            expected_data = ((coordinate, (t0, t1), expected_outputs, None), )
            assertDriverIterDataRecorded(self, expected_data, self.eps)

            expected_data = (('final', (t1, t2), expected_outputs), )
            assertProblemDataRecorded(self, expected_data, self.eps)
Exemple #3
0
    # model.add_design_var('eta_sa', lower = 0.01, upper=1)
    model.add_design_var('M_ps', lower=0.01, upper=100)
    model.add_constraint('con1',)
    model.add_objective('t_tot')

    #Case Recorder Setting
    recorder = SqliteRecorder('test.sql')
    p.driver.add_recorder(recorder)
    p.add_recorder(recorder)

    #Set-up and Run
    p.setup()
    p.set_solver_print(2)
    p.run_driver()
    # view_model(p)
    p.record_iteration('final')
    p.cleanup()

    cr = CaseReader('test.sql')
    drivercases = cr.list_cases('driver')
    case = cr.get_case(drivercases[0])
    print(sorted(case.outputs.keys()))

    print('P_req',['P_req'])
    print('A_sa',p['A_sa'])
    print('M_u',p['M_u'])
    print('M_ps',p['M_ps'])
    print(p['t_tot'])
    print(p['cycles'])