示例#1
0
    def setExperiment(self, experiment):
        parameters = Parameter.parametersToDict(experiment.parameters)

        parameters_linearly_spaced_vals = []

        # parameter start from the lower bound to higher bound
        for idx, parameter in enumerate(experiment.parameters):
            ncpp = self.num_configs_per_param[idx]
            # step_size = (parameter.maximum - parameter.minimum) / (ncpp - 1)
            if ncpp == 1:
                step_size = 0
            else:
                step_size = (parameter.maximum - parameter.minimum) / (ncpp -
                                                                       1)
            parameter_linearly_spaced_vals = [
                parameter.minimum + (i * step_size) for i in range(ncpp)
            ]
            parameter_linearly_spaced_vals = reversed(
                parameter_linearly_spaced_vals)
            parameters_linearly_spaced_vals.append(
                parameter_linearly_spaced_vals)

        # get cartesian product of configs
        parameter_configs_product = itertools.product(
            *parameters_linearly_spaced_vals)
        # create collections of ParameterConfigs from config values
        for parameter_config_collection in parameter_configs_product:
            parameter_configs = []
            for parameter, value in zip(experiment.parameters,
                                        parameter_config_collection):
                parameter_configs.append(
                    ParameterConfig(parameter=parameter, value=value))
            self.grid_parameter_configs.append(parameter_configs)
示例#2
0
 def _writeScript(self, template, parameter_configs, file_prefix):
   """
   Format the template with provided parameter configurations and save locally for reference
   """
   script = Template(template).safe_substitute(ParameterConfig.configsToDict(parameter_configs))
   script_path = f'{self.templated_scripts_dir}/{file_prefix}_{self.experiment.tool_name}_{int(time.time())}.sh'
   with open(script_path, "w") as f:
     f.write(script)
   return script_path, script
示例#3
0
 def _parameterConfigToString(self, parameter_configs):
     """
     transfer parameter configuration into string for hash
     {A: 10, B: 100} ==> '#A#10#B#100'
     """
     cur_config = ''
     for key, value in ParameterConfig.configsToDict(parameter_configs).items():
         cur_config += f'#{key}#{value}'
     return cur_config
示例#4
0
 def _configDictToParameterConfigs(self, config_dict):
     """
     Given a dictionary of parameters configurations, keyed by parameter name, value is value,
     return an array of ParameterConfigs
     """
     parameter_configs = []
     for name, value in config_dict.items():
         param = self.parameters_by_name.get(name, None)
         if param == None:
             raise Exception('Parameter with name "{}" not found in optimizer'.format(name))
         # TODO: This should return ParameterConfig values with the proper type (e.g. int, float)
         parameter_configs.append(ParameterConfig(parameter=param, value=value))
     return parameter_configs
示例#5
0
    def setExperiment(self, experiment):
        parameters = Parameter.parametersToDict(experiment.parameters)
        ncpp = self.num_configs_per_param
        parameters_linearly_spaced_vals = []
        for parameter in experiment.parameters:
            step_size = (parameter.maximum - parameter.minimum) / (ncpp - 1)
            parameter_linearly_spaced_vals = [
                parameter.minimum + (i * step_size) for i in range(ncpp)
            ]
            parameters_linearly_spaced_vals.append(
                parameter_linearly_spaced_vals)

        # get cartesian product of configs
        parameter_configs_product = itertools.product(
            *parameters_linearly_spaced_vals)
        # create collections of ParameterConfigs from config values
        for parameter_config_collection in parameter_configs_product:
            parameter_configs = []
            for parameter, value in zip(experiment.parameters,
                                        parameter_config_collection):
                parameter_configs.append(
                    ParameterConfig(parameter=parameter, value=value))
            self.grid_parameter_configs.append(parameter_configs)
示例#6
0
    def run(self, debug=False):
        """
        Run trials provided by the optimizer while saving results.
        """
        if debug:
            parsl.set_stream_logger()
        self._dfk = parsl.load(self.parsl_config)

        logger.info(f'Starting ParslRunner with config\n{self}')

        flag = True
        initialize_flag = True
        result = None
        for idx, parameter_configs in enumerate(self.optimizer):
            try:
                logger.info(
                    f'Writing script with configs {parameter_configs}\n')
                # command_script_path, command_script_content = self._writeScript(self.command, parameter_configs, 'command')
                # if self.experiment.setup_template_string != None:
                #     _, setup_script_content = self._writeScript(self.experiment.setup_template_string, parameter_configs, 'setup')
                # else:
                #     setup_script_content = None
                # if self.experiment.finish_template_string != None:
                #     _, finish_script_content = self._writeScript(self.experiment.finish_template_string, parameter_configs, 'finish')
                # else:
                #     finish_script_content = None
                setup_script_content, command_script_path, command_script_content, finish_script_content = self._createScript(
                    self.experiment.setup_template_string, self.command,
                    self.experiment.finish_template_string, parameter_configs)

                # set warm-up experiments
                if initialize_flag:
                    initialize_flag = False
                    logger.info(
                        f'[Initial trial for warm-up] Starting trial with script at {command_script_path}\n'
                    )
                    runConfig = paropt.runner.RunConfig(
                        command_script_content=command_script_content,
                        experiment_dict=self.experiment.asdict(),
                        setup_script_content=setup_script_content,
                        finish_script_content=finish_script_content,
                    )
                    initializing_func_param = {}
                    for key, val in self.obj_func_params.items():
                        initializing_func_param[key] = val
                    initializing_func_param['timeout'] = 300
                    # result = self.obj_func(runConfig, **self.obj_func_params).result()
                    result = self.obj_func(runConfig,
                                           **initializing_func_param).result()

                # run baseline experiment
                if (self.baseline) and (self.get_baseline_output is False):
                    self.baseline = False
                    logger.info(f'Creating baseline trial')
                    baseline_parameter_configs = []
                    for parameter in self.baseline_experiment.parameters:
                        baseline_parameter_configs.append(
                            ParameterConfig(parameter=parameter,
                                            value=parameter.minimum))

                    baseline_setup_script_content, baseline_command_script_path, baseline_command_script_content, baseline_finish_script_content = self._createScript(
                        self.experiment.setup_template_string,
                        self.baseline_command,
                        self.experiment.finish_template_string,
                        baseline_parameter_configs)

                    logger.info(
                        f'Starting baseline trial with script at {baseline_command_script_path}\n'
                    )
                    runConfig = paropt.runner.RunConfig(
                        command_script_content=baseline_command_script_content,
                        experiment_dict=self.baseline_experiment.asdict(),
                        setup_script_content=baseline_setup_script_content,
                        finish_script_content=baseline_finish_script_content,
                    )
                    result = None
                    result = self.obj_func(runConfig,
                                           **self.obj_func_params).result()
                    self._validateResult(baseline_parameter_configs, result)
                    result['obj_parameters']['wrt_baseline'] = 1
                    self.baseline_obj_output = result['obj_output']
                    trial = Trial(
                        outcome=result['obj_output'],
                        parameter_configs=baseline_parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters=result['obj_parameters'],
                    )
                    self.storage.saveResult(self.session, trial)
                    self.baseline_time = result['obj_parameters'][
                        'caller_time']
                    self.get_baseline_output = True

                if 'baseline_time' in self.obj_func_params.keys(
                ) and self.obj_func_params[
                        'baseline_time'] is None and self.baseline_time is not None:
                    self.obj_func_params['baseline_time'] = self.baseline_time
                # start normal trials
                logger.info(
                    f'Starting trial with script at {command_script_path}\n')
                runConfig = paropt.runner.RunConfig(
                    command_script_content=command_script_content,
                    experiment_dict=self.experiment.asdict(),
                    setup_script_content=setup_script_content,
                    finish_script_content=finish_script_content,
                )
                result = None
                result = self.obj_func(runConfig,
                                       **self.obj_func_params).result()
                print(result)
                self._validateResult(parameter_configs, result)
                if self.get_baseline_output:
                    result['obj_parameters']['wrt_baseline'] = result[
                        'obj_output'] / self.baseline_obj_output
                trial = Trial(
                    outcome=result['obj_output'],
                    parameter_configs=parameter_configs,
                    run_number=self.run_number,
                    experiment_id=self.experiment.id,
                    obj_parameters=result['obj_parameters'],
                )
                self.storage.saveResult(self.session, trial)
                self.optimizer.register(trial)

                self.run_result[
                    'success'] = True and self.run_result['success']
                flag = flag and self.run_result['success']
                self.run_result['message'][
                    f'experiment {self.experiment.id} run {self.run_number}, config is {ParameterConfig.configsToDict(parameter_configs)}'] = (
                        f'Successfully completed trials {idx} for experiment, output is {result}'
                    )

            except Exception as e:
                err_traceback = traceback.format_exc()
                print(result)
                if result is not None and result[
                        'stdout'] == 'Timeout':  # for timeCommandLimitTime in lib, timeout
                    if self.get_baseline_output:
                        result['obj_parameters']['wrt_baseline'] = result[
                            'obj_output'] / self.baseline_obj_output
                    trial = Trial(
                        outcome=result['obj_output'],
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters=result['obj_parameters'],
                    )
                    self.optimizer.register(trial)
                    logger.exception(f'time out\n')
                    self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx} due to timeout:\nError: {e};\t{err_traceback};\toutput is {result}'
                        )

                else:  # do have error
                    trial = Trial(
                        outcome=10000000,
                        parameter_configs=parameter_configs,
                        run_number=self.run_number,
                        experiment_id=self.experiment.id,
                        obj_parameters={},
                    )
                    if self.save_fail_trial:
                        self.storage.saveResult(self.session, trial)
                    self.run_result['success'] = False
                    self.run_result['message'][
                        f'experiment {self.experiment.id} run {self.run_number}, config is {parameter_configs}'] = (
                            f'Failed to complete trials {idx}:\nError: {e};\t{err_traceback};\toutput is {result}'
                        )
                    print(err_traceback)
                    print(result)

        logger.info(f'Finished; Run result: {self.run_result}\n')

        # plot part
        if self.plot_info['draw_plot']:
            try:
                trials = self.storage.getTrials(self.session,
                                                self.experiment.id)
                trials_dicts = [trial.asdict() for trial in trials]
            except:
                self.session.rollback()
                raise

            logger.info(f'res: {trials_dicts}\n')
            if isinstance(self.optimizer, GridSearch):
                ret = GridSearch_plot(trials_dicts, self.plot_info)
            else:
                logger.info(f'Unsupport type of optimizer for plot\n')

            if ret['success'] == False:
                logger.info(f'Error when generating plot: {ret["error"]}\n')
            else:
                logger.info(f'Successfully generating plot {ret["error"]}\n')
        else:
            logger.info(f'Skip generating plot\n')