예제 #1
0
    def _suggestUniqueParameterConfigs(self):
        """Returns an untested list of parameter configs
        This is used for handling integer values for configuration values
        Since the model can only suggest floats, if Parameters are of integer type we don't want to run
        another trial that tests the exact same set of configurations
        This function raises an exception if unable to find a new configuration
        The approach:
            - get a suggested configuration
            - if the set of configurations have NOT been used before return it
            - if the set of configurations have been used before,
                register the point and get another suggestion
        """
        config_dict = self.optimizer.suggest(self.utility)
        param_configs = self._configDictToParameterConfigs(config_dict)
        trial = self._getTrialWithParameterConfigs(
            param_configs
        )  # trial is None if no existing trial with same parameters
        n_suggests = 0
        while trial != None and n_suggests < MAX_RETRY_SUGGEST:
            # This set of configurations have been used before
            # register a new trail with same outcome but with our suggested (float) values

            self.using_budget_flag = False  # this register does not count in budget

            dup_trial = Trial(
                parameter_configs=param_configs,
                outcome=trial.outcome,
                run_number=trial.run_number,
                experiment_id=trial.experiment_id,
                obj_parameters=trial.obj_parameters,
            )

            try:
                self.register(dup_trial)  # register this trial
            except:
                pass

            # get another suggestion from updated model, and check
            config_dict = self.optimizer.suggest(self.utility)
            param_configs = self._configDictToParameterConfigs(config_dict)
            trial = self._getTrialWithParameterConfigs(param_configs)
            n_suggests += 1

        if n_suggests == MAX_RETRY_SUGGEST:
            logger.warning(f'Meet maximum retry suggest {MAX_RETRY_SUGGEST}\n')
            raise Exception(
                f"BayesOpt failed to find untested config after {n_suggests} attempts. Consider increasing the utility function kappa value.\n"
            )
        self.using_budget_flag = True
        return param_configs
예제 #2
0
  def run(self, debug=False):
    """
    Run trials provided by the optimizer while saving results.
    """
    if debug:
      parsl.set_stream_logger()
    self._dfk = parsl.load(self.parsl_config)
    logger.info(f'Starting ParslRunner with config\n{self}')
    try:
      for parameter_configs in self.optimizer:
        logger.info(f'Writing script with configs {parameter_configs}')
        command_script_path, command_script_content = self._writeScript(self.command, parameter_configs, 'command')
        if self.experiment.setup_template_string != None:
          _, setup_script_content = self._writeScript(self.experiment.setup_template_string, parameter_configs, 'setup')
        else:
          setup_script_content = None
        if self.experiment.finish_template_string != None:
          _, finish_script_content = self._writeScript(self.experiment.finish_template_string, parameter_configs, 'finish')
        else:
          finish_script_content = None

        logger.info(f'Starting trial with script at {command_script_path}')
        runConfig = paropt.runner.RunConfig(
          command_script_content=command_script_content,
          experiment_dict=self.experiment.asdict(),
          setup_script_content=setup_script_content,
          finish_script_content=finish_script_content,
        )
        result = self.parsl_app(runConfig).result()
        self._validateResult(parameter_configs, result)
        trial = Trial(
          outcome=result['run_time'],
          parameter_configs=parameter_configs,
          run_number=self.run_number,
          experiment_id=self.experiment.id,
        )
        self.storage.saveResult(self.session, trial)
        self.optimizer.register(trial)
      
      self.run_result['success'] = True
      self.run_result['message'] = (f'Successfully completed trials for '
                                    f'experiment {self.experiment.id} run {self.run_number}')
    except Exception as e:
      err_traceback = traceback.format_exc()
      self.run_result['success'] = False
      self.run_result['message'] = (f'Failed to complete trials, experiment {self.experiment.id} '
                                    f'run {self.run_number}:\nError: {e}\n{err_traceback}')
      logger.exception(err_traceback)
    logger.info(f'Finished; Run result: {self.run_result}')
예제 #3
0
    def _suggestUniqueParameterConfigs(self):
        """Returns an untested list of parameter configs
    This is used for handling integer values for configuration values
    Since the model can only suggest floats, if Parameters are of integer type we don't want to run
    another trial that tests the exact same set of configurations
    This function raises an exception if unable to find a new configuration
    The approach:
      - get a suggested configuration
      - if the set of configurations have NOT been used before return it
      - if the set of configurations have been used before,
        register the point and get another suggestion
    """
        config_dict = self.optimizer.suggest(self.utility)
        param_configs = self._configDictToParameterConfigs(config_dict)
        trial = self._getTrialWithParameterConfigs(param_configs)
        n_suggests = 0
        while trial != None and n_suggests < MAX_RETRY_SUGGEST:
            logger.info(
                f"Retrying suggest: Non-unique set of ParameterConfigs: {param_configs}"
            )
            # This set of configurations have been used before
            # register a new trail with same outcome but with our suggested (float) values
            dup_trial = Trial(
                parameter_configs=param_configs,
                outcome=trial.outcome,
                run_number=trial.run_number,
                experiment_id=trial.experiment_id,
            )
            self.register(dup_trial)
            # get another suggestion from updated model
            config_dict = self.optimizer.suggest(self.utility)
            param_configs = self._configDictToParameterConfigs(config_dict)
            trial = self._getTrialWithParameterConfigs(param_configs)
            n_suggests += 1

        if n_suggests == MAX_RETRY_SUGGEST:
            raise Exception(
                f"BayesOpt failed to find untested config after {n_suggests} attempts. "
                f"Consider increasing the utility function kappa value")
        return param_configs
예제 #4
0
    def run(self, debug=False):
        """
        Run trials provided by the optimizer while saving results.
        """
        if debug:
            parsl.set_stream_logger()
        self._dfk = parsl.load(self.parsl_config)

        logger.info(f'Starting ParslRunner with config\n{self}')

        flag = True
        initialize_flag = True
        result = None
        for idx, parameter_configs in enumerate(self.optimizer):
            try:
                logger.info(f'Writing script with configs {parameter_configs}')
                command_script_path, command_script_content = self._writeScript(
                    self.command, parameter_configs, 'command')
                if self.experiment.setup_template_string != None:
                    _, setup_script_content = self._writeScript(
                        self.experiment.setup_template_string,
                        parameter_configs, 'setup')
                else:
                    setup_script_content = None
                if self.experiment.finish_template_string != None:
                    _, finish_script_content = self._writeScript(
                        self.experiment.finish_template_string,
                        parameter_configs, 'finish')
                else:
                    finish_script_content = None
                # set warm-up experiments
                if initialize_flag:
                    initialize_flag = False
                    logger.info(
                        f'Starting initializing trial with script at {command_script_path}'
                    )
                    runConfig = paropt.runner.RunConfig(
                        command_script_content=command_script_content,
                        experiment_dict=self.experiment.asdict(),
                        setup_script_content=setup_script_content,
                        finish_script_content=finish_script_content,
                    )
                    result = self.obj_func(runConfig,
                                           **self.obj_func_params).result()

                logger.info(
                    f'Starting trial with script at {command_script_path}')
                runConfig = paropt.runner.RunConfig(
                    command_script_content=command_script_content,
                    experiment_dict=self.experiment.asdict(),
                    setup_script_content=setup_script_content,
                    finish_script_content=finish_script_content,
                )
                result = None
                result = self.obj_func(runConfig,
                                       **self.obj_func_params).result()
                self._validateResult(parameter_configs, result)
                trial = Trial(
                    outcome=result['obj_output'],
                    parameter_configs=parameter_configs,
                    run_number=self.run_number,
                    experiment_id=self.experiment.id,
                    obj_parameters=result['obj_parameters'],
                )
                self.storage.saveResult(self.session, trial)
                self.optimizer.register(trial)
                self.run_result[
                    'success'] = True and self.run_result['success']
                flag = flag and self.run_result['success']
                self.run_result['message'][
                    f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                        f'Successfully completed trials {idx} for experiment')

            except Exception as e:
                err_traceback = traceback.format_exc()
                if result is not None and result[
                        'stdout'] == 'Timeout':  # for timeCommandLimitTime in lib, timeout
                    trial = Trial(
                        outcome=result['obj_output'],
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters=result['obj_parameters'],
                    )
                    self.optimizer.register(trial)
                    logger.exception(f'time out')
                    self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx}:\nError: {e}\n{err_traceback}'
                        )

                else:
                    trial = Trial(
                        outcome=10000000,
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters={},
                    )
                    self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx}:\nError: {e}\n{err_traceback}'
                        )

        logger.info(f'Finished; Run result: {self.run_result}')
예제 #5
0
    def run(self, debug=False):
        """
        Run trials provided by the optimizer while saving results.
        """
        if debug:
            parsl.set_stream_logger()
        self._dfk = parsl.load(self.parsl_config)

        logger.info(f'Starting ParslRunner with config\n{self}')

        flag = True
        initialize_flag = True
        result = None
        for idx, parameter_configs in enumerate(self.optimizer):
            try:
                logger.info(
                    f'Writing script with configs {parameter_configs}\n')
                # command_script_path, command_script_content = self._writeScript(self.command, parameter_configs, 'command')
                # if self.experiment.setup_template_string != None:
                #     _, setup_script_content = self._writeScript(self.experiment.setup_template_string, parameter_configs, 'setup')
                # else:
                #     setup_script_content = None
                # if self.experiment.finish_template_string != None:
                #     _, finish_script_content = self._writeScript(self.experiment.finish_template_string, parameter_configs, 'finish')
                # else:
                #     finish_script_content = None
                setup_script_content, command_script_path, command_script_content, finish_script_content = self._createScript(
                    self.experiment.setup_template_string, self.command,
                    self.experiment.finish_template_string, parameter_configs)

                # set warm-up experiments
                if initialize_flag:
                    initialize_flag = False
                    logger.info(
                        f'[Initial trial for warm-up] Starting trial with script at {command_script_path}\n'
                    )
                    runConfig = paropt.runner.RunConfig(
                        command_script_content=command_script_content,
                        experiment_dict=self.experiment.asdict(),
                        setup_script_content=setup_script_content,
                        finish_script_content=finish_script_content,
                    )
                    initializing_func_param = {}
                    for key, val in self.obj_func_params.items():
                        initializing_func_param[key] = val
                    initializing_func_param['timeout'] = 300
                    # result = self.obj_func(runConfig, **self.obj_func_params).result()
                    result = self.obj_func(runConfig,
                                           **initializing_func_param).result()

                # run baseline experiment
                if (self.baseline) and (self.get_baseline_output is False):
                    self.baseline = False
                    logger.info(f'Creating baseline trial')
                    baseline_parameter_configs = []
                    for parameter in self.baseline_experiment.parameters:
                        baseline_parameter_configs.append(
                            ParameterConfig(parameter=parameter,
                                            value=parameter.minimum))

                    baseline_setup_script_content, baseline_command_script_path, baseline_command_script_content, baseline_finish_script_content = self._createScript(
                        self.experiment.setup_template_string,
                        self.baseline_command,
                        self.experiment.finish_template_string,
                        baseline_parameter_configs)

                    logger.info(
                        f'Starting baseline trial with script at {baseline_command_script_path}\n'
                    )
                    runConfig = paropt.runner.RunConfig(
                        command_script_content=baseline_command_script_content,
                        experiment_dict=self.baseline_experiment.asdict(),
                        setup_script_content=baseline_setup_script_content,
                        finish_script_content=baseline_finish_script_content,
                    )
                    result = None
                    result = self.obj_func(runConfig,
                                           **self.obj_func_params).result()
                    self._validateResult(baseline_parameter_configs, result)
                    result['obj_parameters']['wrt_baseline'] = 1
                    self.baseline_obj_output = result['obj_output']
                    trial = Trial(
                        outcome=result['obj_output'],
                        parameter_configs=baseline_parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters=result['obj_parameters'],
                    )
                    self.storage.saveResult(self.session, trial)
                    self.baseline_time = result['obj_parameters'][
                        'caller_time']
                    self.get_baseline_output = True

                if 'baseline_time' in self.obj_func_params.keys(
                ) and self.obj_func_params[
                        'baseline_time'] is None and self.baseline_time is not None:
                    self.obj_func_params['baseline_time'] = self.baseline_time
                # start normal trials
                logger.info(
                    f'Starting trial with script at {command_script_path}\n')
                runConfig = paropt.runner.RunConfig(
                    command_script_content=command_script_content,
                    experiment_dict=self.experiment.asdict(),
                    setup_script_content=setup_script_content,
                    finish_script_content=finish_script_content,
                )
                result = None
                result = self.obj_func(runConfig,
                                       **self.obj_func_params).result()
                print(result)
                self._validateResult(parameter_configs, result)
                if self.get_baseline_output:
                    result['obj_parameters']['wrt_baseline'] = result[
                        'obj_output'] / self.baseline_obj_output
                trial = Trial(
                    outcome=result['obj_output'],
                    parameter_configs=parameter_configs,
                    run_number=self.run_number,
                    experiment_id=self.experiment.id,
                    obj_parameters=result['obj_parameters'],
                )
                self.storage.saveResult(self.session, trial)
                self.optimizer.register(trial)

                self.run_result[
                    'success'] = True and self.run_result['success']
                flag = flag and self.run_result['success']
                self.run_result['message'][
                    f'experiment {self.experiment.id} run {self.run_number}, config is {ParameterConfig.configsToDict(parameter_configs)}'] = (
                        f'Successfully completed trials {idx} for experiment, output is {result}'
                    )

            except Exception as e:
                err_traceback = traceback.format_exc()
                print(result)
                if result is not None and result[
                        'stdout'] == 'Timeout':  # for timeCommandLimitTime in lib, timeout
                    if self.get_baseline_output:
                        result['obj_parameters']['wrt_baseline'] = result[
                            'obj_output'] / self.baseline_obj_output
                    trial = Trial(
                        outcome=result['obj_output'],
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters=result['obj_parameters'],
                    )
                    self.optimizer.register(trial)
                    logger.exception(f'time out\n')
                    self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx} due to timeout:\nError: {e};\t{err_traceback};\toutput is {result}'
                        )

                else:  # do have error
                    trial = Trial(
                        outcome=10000000,
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters={},
                    )
                    if self.save_fail_trial:
                        self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx}:\nError: {e};\t{err_traceback};\toutput is {result}'
                        )
                    print(err_traceback)
                    print(result)

        logger.info(f'Finished; Run result: {self.run_result}\n')

        # plot part
        if self.plot_info['draw_plot']:
            try:
                trials = self.storage.getTrials(self.session,
                                                self.experiment.id)
                trials_dicts = [trial.asdict() for trial in trials]
            except:
                self.session.rollback()
                raise

            logger.info(f'res: {trials_dicts}\n')
            if isinstance(self.optimizer, GridSearch):
                ret = GridSearch_plot(trials_dicts, self.plot_info)
            else:
                logger.info(f'Unsupport type of optimizer for plot\n')

            if ret['success'] == False:
                logger.info(f'Error when generating plot: {ret["error"]}\n')
            else:
                logger.info(f'Successfully generating plot {ret["error"]}\n')
        else:
            logger.info(f'Skip generating plot\n')