def test_distrib_record_driver(self): size = 100 # how many items in the array prob = Problem() prob.model.add_subsystem('des_vars', IndepVarComp('x', np.ones(size)), promotes=['x']) prob.model.add_subsystem('plus', DistributedAdder(size), promotes=['x', 'y']) prob.model.add_subsystem('summer', Summer(size), promotes=['y', 'sum']) prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.recording_options['includes'] = ['y'] prob.driver.add_recorder(self.recorder) prob.model.add_design_var('x') prob.model.add_objective('sum') prob.setup() prob['x'] = np.ones(size) t0, t1 = run_driver(prob) prob.cleanup() coordinate = [0, 'Driver', (0, )] expected_desvars = { "des_vars.x": prob['des_vars.x'], } expected_objectives = { "summer.sum": prob['summer.sum'], } expected_outputs = expected_desvars.copy() expected_outputs['plus.y'] = prob.get_val('plus.y', get_remote=True) if prob.comm.rank == 0: expected_outputs.update(expected_objectives) expected_data = ((coordinate, (t0, t1), expected_outputs, None), ) assertDriverIterDataRecorded(self, expected_data, self.eps)
def test_distrib_record_driver(self): size = 100 # how many items in the array prob = Problem() prob.model = Group() prob.model.add_subsystem('des_vars', IndepVarComp('x', np.ones(size)), promotes=['x']) prob.model.add_subsystem('plus', DistributedAdder(size), promotes=['x', 'y']) prob.model.add_subsystem('summer', Summer(size), promotes=['y', 'sum']) prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_responses'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.recording_options['includes'] = [] prob.driver.add_recorder(self.recorder) prob.model.add_design_var('x') prob.model.add_objective('sum') prob.setup(check=False) prob['x'] = np.ones(size) t0, t1 = run_driver(prob) prob.cleanup() if prob.comm.rank == 0: coordinate = [0, 'Driver', (0,)] expected_desvars = { "des_vars.x": prob['des_vars.x'], } expected_objectives = { "summer.sum": prob['summer.sum'], } expected_outputs = expected_desvars expected_outputs.update(expected_objectives) expected_data = ((coordinate, (t0, t1), expected_outputs, None),) assertDriverIterDataRecorded(self, expected_data, self.eps)
def test_recording_remote_voi(self): # Create a parallel model model = Group() model.add_subsystem('par', ParallelGroup()) model.par.add_subsystem('G1', Mygroup()) model.par.add_subsystem('G2', Mygroup()) model.connect('par.G1.y', 'Obj.y1') model.connect('par.G2.y', 'Obj.y2') model.add_subsystem('Obj', ExecComp('obj=y1+y2')) model.add_objective('Obj.obj') # Configure driver to record VOIs on both procs driver = ScipyOptimizeDriver(disp=False) driver.recording_options['record_desvars'] = True driver.recording_options['record_responses'] = True driver.recording_options['record_objectives'] = True driver.recording_options['record_constraints'] = True driver.recording_options['includes'] = ['par.G1.y', 'par.G2.y'] driver.add_recorder(self.recorder) # Create problem and run driver prob = Problem(model, driver) prob.setup() t0, t1 = run_driver(prob) prob.cleanup() # Since the test will compare the last case recorded, just check the # current values in the problem. This next section is about getting those values # These involve collective gathers so all ranks need to run this expected_outputs = prob.driver.get_design_var_values() expected_outputs.update(prob.driver.get_objective_values()) expected_outputs.update(prob.driver.get_constraint_values()) # includes for outputs are specified as promoted names but we need absolute names prom2abs = model._var_allprocs_prom2abs_list['output'] abs_includes = [ prom2abs[n][0] for n in prob.driver.recording_options['includes'] ] # Absolute path names of includes on this rank rrank = model.comm.rank rowned = model._owning_rank local_includes = [n for n in abs_includes if rrank == rowned[n]] # Get values for all vars on this rank inputs, outputs, residuals = model.get_nonlinear_vectors() # Get values for includes on this rank local_vars = {n: outputs[n] for n in local_includes} # Gather values for includes on all ranks all_vars = model.comm.gather(local_vars, root=0) if prob.comm.rank == 0: # Only on rank 0 do we have all the values. The all_vars variable is a list of # dicts from all ranks 0,1,... In this case, just ranks 0 and 1 dct = all_vars[-1] for d in all_vars[:-1]: dct.update(d) expected_includes = { 'par.G1.Cy.y': dct['par.G1.Cy.y'], 'par.G2.Cy.y': dct['par.G2.Cy.y'], } expected_outputs.update(expected_includes) coordinate = [0, 'ScipyOptimize_SLSQP', (driver.iter_count - 1, )] expected_data = ((coordinate, (t0, t1), expected_outputs, None), ) assertDriverIterDataRecorded(self, expected_data, self.eps)
def test_recording_remote_voi(self): # Create a parallel model model = Group() model.add_subsystem('par', ParallelGroup()) model.par.add_subsystem('G1', Mygroup()) model.par.add_subsystem('G2', Mygroup()) model.connect('par.G1.y', 'Obj.y1') model.connect('par.G2.y', 'Obj.y2') model.add_subsystem('Obj', ExecComp('obj=y1+y2')) model.add_objective('Obj.obj') # Configure driver to record VOIs on both procs driver = ScipyOptimizeDriver(disp=False) driver.recording_options['record_desvars'] = True driver.recording_options['record_responses'] = True driver.recording_options['record_objectives'] = True driver.recording_options['record_constraints'] = True driver.recording_options['includes'] = ['par.G1.y', 'par.G2.y'] driver.add_recorder(self.recorder) # Create problem and run driver prob = Problem(model, driver) prob.add_recorder(self.recorder) prob.setup() t0, t1 = run_driver(prob) prob.record_iteration('final') t2 = time() prob.cleanup() # Since the test will compare the last case recorded, just check the # current values in the problem. This next section is about getting those values # These involve collective gathers so all ranks need to run this expected_outputs = driver.get_design_var_values() expected_outputs.update(driver.get_objective_values()) expected_outputs.update(driver.get_constraint_values()) # includes for outputs are specified as promoted names but we need absolute names prom2abs = model._var_allprocs_prom2abs_list['output'] abs_includes = [prom2abs[n][0] for n in prob.driver.recording_options['includes']] # Absolute path names of includes on this rank rrank = model.comm.rank rowned = model._owning_rank local_includes = [n for n in abs_includes if rrank == rowned[n]] # Get values for all vars on this rank inputs, outputs, residuals = model.get_nonlinear_vectors() # Get values for includes on this rank local_vars = {n: outputs[n] for n in local_includes} # Gather values for includes on all ranks all_vars = model.comm.gather(local_vars, root=0) if prob.comm.rank == 0: # Only on rank 0 do we have all the values. The all_vars variable is a list of # dicts from all ranks 0,1,... In this case, just ranks 0 and 1 dct = all_vars[-1] for d in all_vars[:-1]: dct.update(d) expected_includes = { 'par.G1.Cy.y': dct['par.G1.Cy.y'], 'par.G2.Cy.y': dct['par.G2.Cy.y'], } expected_outputs.update(expected_includes) coordinate = [0, 'ScipyOptimize_SLSQP', (driver.iter_count-1,)] expected_data = ((coordinate, (t0, t1), expected_outputs, None),) assertDriverIterDataRecorded(self, expected_data, self.eps) expected_data = (('final', (t1, t2), expected_outputs),) assertProblemDataRecorded(self, expected_data, self.eps)
def test_recording_remote_voi(self): prob = Problem() prob.model.add_subsystem('par', ParallelGroup()) prob.model.par.add_subsystem('G1', Mygroup()) prob.model.par.add_subsystem('G2', Mygroup()) prob.model.add_subsystem('Obj', ExecComp('obj=y1+y2')) prob.model.connect('par.G1.y', 'Obj.y1') prob.model.connect('par.G2.y', 'Obj.y2') prob.model.add_objective('Obj.obj') prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_responses'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.recording_options['includes'] = [ 'par.G1.Cy.y', 'par.G2.Cy.y' ] prob.driver.add_recorder(self.recorder) prob.setup() t0, t1 = run_driver(prob) prob.cleanup() # Since the test will compare the last case recorded, just check the # current values in the problem. This next section is about getting those values # These involve collective gathers so all ranks need to run this expected_outputs = prob.driver.get_design_var_values() expected_outputs.update(prob.driver.get_objective_values()) expected_outputs.update(prob.driver.get_constraint_values()) # Determine the expected values for the sysincludes # this gets all of the outputs but just locally rrank = prob.comm.rank # root ( aka model ) rank. rowned = prob.model._owning_rank # names of sysincl vars on this rank local_inclnames = [ n for n in prob.driver.recording_options['includes'] if rrank == rowned[n] ] # Get values for vars on this rank inputs, outputs, residuals = prob.model.get_nonlinear_vectors() # Potential local sysvars are in this sysvars = outputs._views # Just get the values for the sysincl vars on this rank local_vars = {c: sysvars[c] for c in local_inclnames} # Gather up the values for all the sysincl vars on all ranks all_vars = prob.model.comm.gather(local_vars, root=0) if prob.comm.rank == 0: # Only on rank 0 do we have all the values. The all_vars variable is a list of # dicts from all ranks 0,1,... In this case, just ranks 0 and 1 dct = all_vars[-1] for d in all_vars[:-1]: dct.update(d) expected_includes = { 'par.G1.Cy.y': dct['par.G1.Cy.y'], 'par.G2.Cy.y': dct['par.G2.Cy.y'], } expected_outputs.update(expected_includes) coordinate = [0, 'SLSQP', (48, )] expected_data = ((coordinate, (t0, t1), expected_outputs, None), ) assertDriverIterDataRecorded(self, expected_data, self.eps)