def _build_param_dict(self): # make a list with all possible parameter options combinations param_combs = sweep_funcs.build_options_combinations(self.__params_opts) if not param_combs[0]: self._add_param('base_options', []) else: for param_sweep_opts in param_combs: name = sweep_funcs.set_single_trial_options(self.__base_options, param_sweep_opts, 'param')[1] self._add_param(name, param_sweep_opts) return None
def _build_trial_dict(self): # make a list with all possible trial options combinations trial_combs = sweep_funcs.build_options_combinations(self.__trials_opts) # add trial for each possible combination if not trial_combs[0]: self._add_trial('base_trial', self.__base_options) else: for trial_sweep_opts in trial_combs: single_trial_options, name = sweep_funcs.set_single_trial_options(self.__base_options, trial_sweep_opts, 'trial') self._add_trial(name, single_trial_options) return None
def run(self, final_homotopy_step='final', warmstart_file=None, apply_sweeping_warmstart=False, debug_flags=[], debug_locations=[]): awelogger.logger.info('Running sweep (' + self.__name + ') containing ' + str(len(list(self.__trial_dict.keys()))) + ' trials...') have_already_saved_prev_trial = False # for all trials, run a parametric sweep for trial_to_run in list(self.__trial_dict.keys()): # build trial once single_trial = self.__trial_dict[trial_to_run] single_trial.build(False) self.__sweep_dict[trial_to_run] = OrderedDict() self.__sweep_labels[trial_to_run] = OrderedDict() self.__plot_dict[trial_to_run] = OrderedDict() # run parametric sweep for param in list(self.__param_dict.keys()): awelogger.logger.info( 'Optimize trial (%s) with parametric setting (%s)', trial_to_run, param) if param == 'base_options': # take the existing trial options for optimizing param_options = single_trial.options else: # add parametric sweep options to trial options and re-build param_options = sweep_funcs.set_single_trial_options( single_trial.options, self.__param_dict[param], 'param')[0] param_options.build(single_trial.model.architecture) self.__trial_dict[ trial_to_run].formulation.generate_parameterization_settings( param_options['formulation']) # optimize trial warmstart_file, prev_trial_save_name = sweep_funcs.make_warmstarting_decisions( self.__name, user_defined_warmstarting_file=warmstart_file, apply_sweeping_warmstart=apply_sweeping_warmstart, have_already_saved_prev_trial=have_already_saved_prev_trial ) single_trial.optimize(options=param_options, final_homotopy_step=final_homotopy_step, debug_flags=debug_flags, debug_locations=debug_locations, warmstart_file=warmstart_file) if apply_sweeping_warmstart and single_trial.return_status_numeric < 3: single_trial.save(fn=prev_trial_save_name) have_already_saved_prev_trial = True recalibrated_plot_dict = sweep_funcs.recalibrate_visualization( single_trial) self.__plot_dict[trial_to_run][param] = copy.deepcopy( recalibrated_plot_dict) # overwrite outputs to work around pickle bug for key in recalibrated_plot_dict['outputs']: self.__plot_dict[trial_to_run][param]['outputs'][ key] = copy.deepcopy( recalibrated_plot_dict['outputs'][key]) # save result single_trial_solution_dict = single_trial.generate_solution_dict( ) self.__sweep_dict[trial_to_run][param] = copy.deepcopy( single_trial_solution_dict) # overwrite outputs to work around pickle bug for i in range(len(single_trial_solution_dict['output_vals'])): self.__sweep_dict[trial_to_run][param]['output_vals'][ i] = copy.deepcopy( single_trial_solution_dict['output_vals'][i]) self.__sweep_labels[trial_to_run][ param] = trial_to_run + '_' + param awelogger.logger.info('Sweep (' + self.__name + ') completed.')
def run(self, final_homotopy_step = 'final', warmstart_file = None, debug_flags = [], debug_locations = []): # build sweep in order to run it self.build() logging.info('Running sweep (' + self.__name + ') containing ' + str(len(list(self.__trial_dict.keys()))) + ' trials...') # for all trials, run a parametric sweep for trial_to_run in list(self.__trial_dict.keys()): # build trial once single_trial = self.__trial_dict[trial_to_run] single_trial.build(False) self.__sweep_dict[trial_to_run] = OrderedDict() self.__sweep_labels[trial_to_run] = OrderedDict() self.__plot_dict[trial_to_run] = OrderedDict() # run parametric sweep for param in list(self.__param_dict.keys()): logging.info('Optimize trial (%s) with parametric setting (%s)',trial_to_run, param) if param == 'base_options': # take the existing trial options for optimizing param_options = single_trial.options else: # add parametric sweep options to trial options and re-build param_options = sweep_funcs.set_single_trial_options(single_trial.options, self.__param_dict[param], 'param')[0] param_options.build(single_trial.model.architecture) self.__trial_dict[trial_to_run].formulation.generate_parameterization_settings(param_options['formulation']) # optimize trial single_trial.optimize(options = param_options, final_homotopy_step = final_homotopy_step, debug_flags = debug_flags, debug_locations = debug_locations, warmstart_file = warmstart_file) # recalibrate visualization V_plot = single_trial.optimization.V_opt p_fix_num = single_trial.optimization.p_fix_num output_vals = single_trial.optimization.output_vals time_grids = single_trial.optimization.time_grids integral_outputs_final = single_trial.optimization.integral_outputs_final name = single_trial.name parametric_options = single_trial.options iterations = single_trial.optimization.iterations return_status_numeric = single_trial.optimization.return_status_numeric timings = single_trial.optimization.timings cost_fun = single_trial.nlp.cost_components[0] cost = struct_op.evaluate_cost_dict(cost_fun, V_plot, p_fix_num) recalibrated_plot_dict = tools.recalibrate_visualization(V_plot, single_trial.visualization.plot_dict, output_vals, integral_outputs_final, parametric_options, time_grids, cost, name, iterations=iterations, return_status_numeric=return_status_numeric, timings=timings) self.__plot_dict[trial_to_run][param] = copy.deepcopy(recalibrated_plot_dict) # overwrite outputs to work around pickle bug for key in recalibrated_plot_dict['outputs']: self.__plot_dict[trial_to_run][param]['outputs'][key] = copy.deepcopy(recalibrated_plot_dict['outputs'][key]) # save result single_trial_solution_dict = single_trial.generate_solution_dict() self.__sweep_dict[trial_to_run][param] = copy.deepcopy(single_trial_solution_dict) # overwrite outputs to work around pickle bug for i in range(len(single_trial_solution_dict['output_vals'])): self.__sweep_dict[trial_to_run][param]['output_vals'][i] = copy.deepcopy(single_trial_solution_dict['output_vals'][i]) self.__sweep_labels[trial_to_run][param] = trial_to_run + '_' + param logging.info('Sweep (' + self.__name + ') completed.')