コード例 #1
0
    def run_session(self):
        params_set = dict(self.planner_config[self.planner].items())
        manipulator = self._load_search_space(params_set)
        interface = DefaultMeasurementInterface(args=self.args,
                                                manipulator=manipulator)
        # Using OpenTuner API
        # https://github.com/jansel/opentuner/blob/master/examples/py_api/api_example.py
        api = TuningRunManager(interface, self.args)

        self.n_trial = 0  # Reset to n_trials to zero for each planner
        start_time = timer()
        params = {'planner': self.planner, 'start_time': start_time}
        if (self.MAX_RUNTIME != 'None'):
            params['end_time'] = timer() + self.MAX_RUNTIME
            print('\n')
            rospy.loginfo('Executing %s on %s for %d secs', self.MODE,
                          params['planner'], self.MAX_RUNTIME)
        else:
            print('\n')
            rospy.loginfo('Executing %s on %s for %d trials', self.MODE,
                          params['planner'], self.MAX_TRIALS)

        for _ in range(self.MAX_TRIALS):
            desired_result = api.get_next_desired_result()
            params_set = desired_result.configuration.data
            params['params_set'] = params_set

            run_result = self._opentuner_obj(params)
            result = Result(time=run_result['loss'])
            api.report_result(desired_result, result)
コード例 #2
0
 def __init__(self, *ot_args, **ot_kwargs):
     """
     Creates communication queues and spawn a thread
     to run the tuning logic.
     """
     super(OpenTunerDriver, self).__init__()
     self._best_config = None
     interface = CtreeMeasurementInterface(self, *ot_args, **ot_kwargs)
     arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers())
     config_args = CONFIG.get("opentuner", "args").split()
     tuner_args = arg_parser.parse_args(config_args)
     self.manager = TuningRunManager(interface, tuner_args)
     self._converged = False
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())
    args = parser.parse_args()
    manipulator = ConfigurationManipulator()
    manipulator.add_parameter(IntegerParameter('x', -200, 200))
    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='examples',
                                            program_name='api_test',
                                            program_version='0.1')
    api = TuningRunManager(interface, args)
    for x in xrange(500):
        desired_result = api.get_next_desired_result()
        if desired_result is None:
            # The search space for this example is very small, so sometimes
            # the techniques have trouble finding a config that hasn't already
            # been tested.  Change this to a continue to make it try again.
            break
        cfg = desired_result.configuration.data
        result = Result(time=test_func(cfg))
        api.report_result(desired_result, result)

    best_cfg = api.get_best_configuration()
    api.finish()
    print 'best x found was', best_cfg['x']
コード例 #4
0
    def __init__(self, train_queries, test_queries, experiment_config,
                 result_dir, system_environment):
        """
        Args:
            train_queries (list of queries (SQLQuery queries))
            test_queries (list of queries (SQLQuery queries))
            
            result_dir (str) 
            system (PostgresSystemEnvironment): encapsulates environment
        """
        self.logger = logging.getLogger(__name__)
        self.logger.info('Setting up OpenTuner...')

        self.train_queries = train_queries
        self.test_queries = test_queries
        self.experiment_config = experiment_config

        self.max_size = experiment_config['max_size']
        self.size_weight = experiment_config['size_weight']
        self.max_runtime = experiment_config['max_runtime']
        self.runtime_weight = experiment_config['runtime_weight']
        self.reward_penalty = experiment_config['reward_penalty']

        self.system_environment = system_environment

        self.result_dir = result_dir

        # 2nd search space
        self.n_idxs = experiment_config['n_queries_per_episode']
        self.n_cols_per_idx = 3  # TODO hardcode

        self.tbls = experiment_config['tables']
        self.n_idxs_per_tbl = int(self.n_idxs / len(self.tbls))

        # maps tbls to mapping of tbl's column indices to tbl's columns, per search space representation
        self.tbl_2_col_idx_2_col = {}
        for tbl in self.tbls:
            self.tbl_2_col_idx_2_col[tbl] = {}
            for col_idx, col in enumerate(tpch_table_columns[tbl].keys()):
                self.tbl_2_col_idx_2_col[tbl][col_idx +
                                              1] = col  # + 1 b/c 0 is noop

        # api
        sys.argv = [sys.argv[0]]  # opentuner expects own args
        parser = argparse.ArgumentParser(parents=opentuner.argparsers())
        args = parser.parse_args()
        manipulator = self.build_search_space()
        interface = DefaultMeasurementInterface(args=args,
                                                manipulator=manipulator)
        self.api = TuningRunManager(interface, args)
コード例 #5
0
ファイル: api_example.py プロジェクト: charleslo/opentuner
def main():
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())
    args = parser.parse_args()
    manipulator = ConfigurationManipulator()
    manipulator.add_parameter(IntegerParameter('x', -200, 200))
    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='examples',
                                            program_name='api_test',
                                            program_version='0.1')
    api = TuningRunManager(interface, args)
    for x in range(500):
        desired_result = api.get_next_desired_result()
        if desired_result is None:
          # The search space for this example is very small, so sometimes
          # the techniques have trouble finding a config that hasn't already
          # been tested.  Change this to a continue to make it try again.
          break
        cfg = desired_result.configuration.data
        result = Result(time=test_func(cfg))
        api.report_result(desired_result, result)

    best_cfg = api.get_best_configuration()
    api.finish()
    print('best x found was', best_cfg['x'])
コード例 #6
0
def create_benchmark_api(benchmark):
    args = argparser.parse_args()
    args.benchmark = benchmark
    args.database = "{}_{}{}".format(
        os.path.splitext(args.database)[0], benchmark,
        os.path.splitext(args.database)[1])
    interface = ReproBLASTuner(args)
    api = TuningRunManager(interface, args)
    return api
コード例 #7
0
class OpenTunerDriver(TuningDriver):
    """
    Object that interacts with backend tuners. Provides
    a stream of configurations, as well as an interface
    to report on the performance of each.
    """
    def __init__(self, *ot_args, **ot_kwargs):
        """
        Creates communication queues and spawn a thread
        to run the tuning logic.
        """
        super(OpenTunerDriver, self).__init__()
        self._best_config = None
        interface = CtreeMeasurementInterface(self, *ot_args, **ot_kwargs)
        arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers())
        config_args = CONFIG.get("opentuner", "args").split()
        tuner_args = arg_parser.parse_args(config_args)
        self.manager = TuningRunManager(interface, tuner_args)
        self._converged = False

    def _get_configs(self):
        """Get the next configuration to test."""
        timeout = CONFIG.getint("opentuner", "timeout")
        while True:
            self.curr_desired_result = self.manager.get_next_desired_result()
            if self.curr_desired_result is None:
                break
            yield self.curr_desired_result.configuration.data
            print("Best configuration", self.manager.get_best_configuration())

        log.info("exhausted stream of configurations.")
        best_config = self.manager.get_best_configuration()
        assert best_config != None, "No best configuration reported."
        self._converged = True
        while True:
            yield best_config

    def report(self, **kwargs):
        """Report the performance of the most recent configuration."""
        if not self._converged:
            print("Tuning run result:",
                  self.curr_desired_result.configuration.data, kwargs)
            self.manager.report_result(self.curr_desired_result,
                                       Result(**kwargs))
コード例 #8
0
ファイル: driver.py プロジェクト: i-Zaak/ctree
class OpenTunerDriver(TuningDriver):
    """
    Object that interacts with backend tuners. Provides
    a stream of configurations, as well as an interface
    to report on the performance of each.
    """
    def __init__(self, *ot_args, **ot_kwargs):
        """
        Creates communication queues and spawn a thread
        to run the tuning logic.
        """
        super(OpenTunerDriver, self).__init__()
        self._best_config = None
        interface = CtreeMeasurementInterface(self, *ot_args, **ot_kwargs)
        arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers())
        config_args = CONFIG.get("opentuner", "args").split()
        tuner_args = arg_parser.parse_args(config_args)
        self.manager = TuningRunManager(interface, tuner_args)
        self._converged = False

    def _get_configs(self):
        """Get the next configuration to test."""
        timeout = CONFIG.getint("opentuner", "timeout")
        while True:
            self.curr_desired_result = self.manager.get_next_desired_result()
            if self.curr_desired_result is None:
                break
            yield self.curr_desired_result.configuration.data
            print("Best configuration", self.manager.get_best_configuration())

        log.info("exhausted stream of configurations.")
        best_config = self.manager.get_best_configuration()
        assert best_config != None, "No best configuration reported."
        self._converged = True
        while True:
            yield best_config

    def report(self, **kwargs):
        """Report the performance of the most recent configuration."""
        if not self._converged:
            print("Tuning run result:", self.curr_desired_result.configuration.data, kwargs)
            self.manager.report_result(self.curr_desired_result, Result(**kwargs))
コード例 #9
0
def create_test_tuning_run(db):
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())
    args = parser.parse_args()
    args.database = db
    manipulator = ConfigurationManipulator()
    manipulator.add_parameter(IntegerParameter('x', -200, 200))
    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='examples',
                                            program_name='api_test',
                                            program_version='0.1')
    api = TuningRunManager(interface, args)
    return api
コード例 #10
0
ファイル: driver.py プロジェクト: i-Zaak/ctree
 def __init__(self, *ot_args, **ot_kwargs):
     """
     Creates communication queues and spawn a thread
     to run the tuning logic.
     """
     super(OpenTunerDriver, self).__init__()
     self._best_config = None
     interface = CtreeMeasurementInterface(self, *ot_args, **ot_kwargs)
     arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers())
     config_args = CONFIG.get("opentuner", "args").split()
     tuner_args = arg_parser.parse_args(config_args)
     self.manager = TuningRunManager(interface, tuner_args)
     self._converged = False
コード例 #11
0
ファイル: python_template.py プロジェクト: LWetz/Final
import argparse

def get_next_desired_result():
    global desired_result
    desired_result = api.get_next_desired_result()
    while desired_result is None:
        desired_result = api.get_next_desired_result()
    return desired_result.configuration.data

def report_result(runtime):
    api.report_result(desired_result, Result(time=runtime))

def finish():
    api.finish()

parser = argparse.ArgumentParser(parents=opentuner.argparsers())
args = parser.parse_args()

manipulator = ConfigurationManipulator()
:::parameters:::
interface = DefaultMeasurementInterface(args=args,
                                        manipulator=manipulator,
                                        project_name='atf_library',
                                        program_name='atf_library',
                                        program_version='0.1')


api = TuningRunManager(interface, args)

)"
コード例 #12
0
ファイル: tuner.py プロジェクト: phrb/legup-tuner
def tuning_loop():
    report_delay = 30
    last_time    = time.time()
    start_time   = last_time
    iterations   = 5
    parser       = argparse.ArgumentParser(parents = opentuner.argparsers())

    parser.add_argument("--processes",
                        type = int,
                        help = "Number of Python threads available.")
    parser.add_argument("--no-wait",
                        action = "store_true",
                        help   = "Do not wait for requested results to generate more requests.")
    parser.add_argument("--application",
                        type = str,
                        help = "Application name.")
    parser.add_argument("--verilog-file",
                        type = str,
                        help = "Verilog file for the application.")

    args         = parser.parse_args()
    pool         = ThreadPool(args.processes)
    manipulator  = ConfigurationManipulator()

    global application
    global verilog_file
    global application_path
    global container_path
    global host_path
    global image_name
    global script_name

    global tuning_init

    application      = args.application
    verilog_file     = args.verilog_file
    application_path = "/root/legup_src/legup-4.0/examples/chstone/{0}".format(application)
    container_path   = "/root/legup_src/legup-4.0/examples/chstone/{0}/tuner".format(application)
    host_path        = "/home/bruelp/legup-tuner/post_place_and_route/py"
    image_name       = "legup_quartus"
    script_name      = "measure.sh"

    print(application, container_path, application_path)

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args            = args,
                                            manipulator     = manipulator,
                                            project_name    = 'HLS-FPGAs',
                                            program_name    = 'legup-tuner',
                                            program_version = '0.0.1')

    manager = TuningRunManager(interface, args)

    current_time      = time.time()
    computing_results = []
    computed_results  = []
    desired_results   = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([desired_result,
                                              pool.apply_async(get_wallclock_time,
                                                              (desired_result.configuration.data, ))])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time = cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs    = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time, cfgs).get(timeout = None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(dr,
                                          Result(time = result['value'],
                                                 cycles = result['cycles'],
                                                 fmax = result['fmax'],
                                                 LU = result['lu'],
                                                 pins = result['pins'],
                                                 regs = result['regs'],
                                                 block = result['block'],
                                                 ram = result['ram'],
                                                 dsp = result['dsp']))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()
コード例 #13
0
class OpentunerOptimizer(AbstractOptimizer):
    primary_import = "opentuner"

    def __init__(self,
                 api_config,
                 techniques=DEFAULT_TECHNIQUES,
                 n_suggestions=1):
        """Build wrapper class to use opentuner optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.

        techniques : iterable of strings
            A list or tuple of techniques to use in opentuner. If the list
            has only one technique, then that technique will be used. If the
            list has multiple techniques a bandit over those techniques
            will be used.

        n_suggestions : int
            Default number of suggestions to be made in parallel.
        """
        AbstractOptimizer.__init__(self, api_config)

        # Opentuner requires DesiredResult to reference suggestion when making
        # its observation. x_to_dr maps the dict suggestion to DesiredResult.
        self.x_to_dr = {}
        # Keep last suggested x and repeat it whenever opentuner gives up.
        self.dummy_suggest = None
        """Setting up the arguments for opentuner. You can see all possible
        arguments using:
        ```
        >>> import opentuner
        >>> opentuner.default_argparser().parse_args(['-h'])
        ```
        We only change a few arguments (other arguments are set to defaults):
        * database = MEMORY_ONLY_DB: to use an in-memory sqlite database
        * parallelism = n_suggestions: num of suggestions to give in parallel
        * technique = techniques: a list of techniques to be used by opentuner
        * print_params = False: to avoid opentuner from exiting after printing
            param spaces
        """
        args = Namespace(
            bail_threshold=500,
            database=MEMORY_ONLY_DB,
            display_frequency=10,
            generate_bandit_technique=False,
            label=None,
            list_techniques=False,
            machine_class=None,
            no_dups=False,
            parallel_compile=False,
            parallelism=n_suggestions,
            pipelining=0,
            print_params=False,
            print_search_space_size=False,
            quiet=False,
            results_log=None,
            results_log_details=None,
            seed_configuration=[],
            stop_after=None,
            technique=techniques,
            test_limit=5000,
        )

        # Setup some dummy classes required by opentuner to actually run.
        manipulator = OpentunerOptimizer.build_manipulator(api_config)
        interface = DMI(args=args, manipulator=manipulator)
        self.api = TuningRunManager(interface, args)

    @staticmethod
    def hashable_dict(d):
        """A custom function for hashing dictionaries.

        Parameters
        ----------
        d : dict or dict-like
            The dictionary to be converted to immutable/hashable type.

        Returns
        -------
        hashable_object : frozenset of tuple pairs
            Bijective equivalent to dict that can be hashed.
        """
        hashable_object = frozenset(d.items())
        return hashable_object

    @staticmethod
    def build_manipulator(api_config):
        """Build a ConfigurationManipulator object to be used by opentuner.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.

        Returns
        -------
        manipulator : ConfigurationManipulator
            Some over complexified class required by opentuner to run.
        """
        manipulator = ConfigurationManipulator()

        for pname in api_config:
            ptype = api_config[pname]["type"]
            pspace = api_config[pname].get("space", None)
            pmin, pmax = api_config[pname].get("range", (None, None))

            if ptype == "real":
                if pspace in ("linear", "logit"):
                    ot_param = FloatParameter(pname, pmin, pmax)
                elif pspace in ("log", "bilog"):
                    LogFloatParameter_ = ClippedParam(LogFloatParameter)
                    ot_param = LogFloatParameter_(pname, pmin, pmax)
                else:
                    assert False, "unsupported param space = %s" % pspace
            elif ptype == "int":
                if pspace in ("linear", "logit"):
                    ot_param = IntegerParameter(pname, pmin, pmax)
                elif pspace in ("log", "bilog"):
                    ot_param = LogIntegerParameter(pname, pmin, pmax)
                else:
                    assert False, "unsupported param space = %s" % pspace
            elif ptype == "bool":
                # The actual bool parameter seems not to work in Py3 :(
                ot_param = IntegerParameter(pname, 0, 1)
            elif ptype in ("cat", "ordinal"):
                # Treat ordinal and categorical variables the same for now.
                assert "values" in api_config[pname]
                pvalues = api_config[pname]["values"]
                ot_param = EnumParameter(pname, pvalues)
            else:
                assert False, "type=%s/space=%s not handled in opentuner yet" % (
                    ptype, pspace)
            manipulator.add_parameter(ot_param)
        return manipulator

    def suggest(self, n_suggestions=1):
        """Make `n_suggestions` suggestions for what to evaluate next.

        This requires the user observe all previous suggestions before calling
        again.

        Parameters
        ----------
        n_suggestions : int
            The number of suggestions to return.

        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """
        assert n_suggestions >= 1, "invalid value for n_suggestions"

        # Update the n_suggestions if it is different from the current setting.
        if self.api.search_driver.args.parallelism != n_suggestions:
            self.api.search_driver.args.parallelism = n_suggestions
            warnings.warn("n_suggestions changed across suggest calls")

        # Require the user to already observe all previous suggestions.
        # Otherwise, opentuner will just recycle old suggestions.
        assert len(
            self.x_to_dr
        ) == 0, "all the previous suggestions should have been observed by now"

        # The real meat of suggest from opentuner: Get next `n_suggestions`
        # unique suggestions.
        desired_results = [
            self.api.get_next_desired_result() for _ in range(n_suggestions)
        ]

        # Save DesiredResult object in dict since observe will need it.
        X = []
        using_dummy_suggest = False
        for ii in range(n_suggestions):
            # Opentuner can give up, but the API requires guessing forever.
            if desired_results[ii] is None:
                assert self.dummy_suggest is not None, "opentuner gave up on the first call!"
                # Use the dummy suggestion in this case.
                X.append(self.dummy_suggest)
                using_dummy_suggest = True
                continue

            # Get the simple dict equivalent to suggestion.
            x_guess = desired_results[ii].configuration.data
            X.append(x_guess)

            # Now save the desired result for future use in observe.
            x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)
            assert x_guess_ not in self.x_to_dr, "the suggestions should not already be in the x_to_dr dict"
            self.x_to_dr[x_guess_] = desired_results[ii]
            # This will also catch None from opentuner.
            assert isinstance(self.x_to_dr[x_guess_], DesiredResult)

        assert len(
            X
        ) == n_suggestions, "incorrect number of suggestions provided by opentuner"
        # Log suggestion for repeating if opentuner gives up next time. We can
        # only do this when it is not already being used since it we will be
        # checking guesses against dummy_suggest in observe.
        if not using_dummy_suggest:
            self.dummy_suggest = X[-1]
        return X

    def observe(self, X, y):
        """Feed the observations back to opentuner.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated.
        """
        assert len(X) == len(y)

        for x_guess, y_ in zip(X, y):
            x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)

            # If we can't find the dr object then it must be the dummy guess.
            if x_guess_ not in self.x_to_dr:
                assert x_guess == self.dummy_suggest, "Appears to be guess that did not originate from suggest"
                continue

            # Get the corresponding DesiredResult object.
            dr = self.x_to_dr.pop(x_guess_, None)
            # This will also catch None from opentuner.
            assert isinstance(
                dr,
                DesiredResult), "DesiredResult object not available in x_to_dr"

            # Opentuner's arg names assume we are minimizing execution time.
            # So, if we want to minimize we have to pretend y is a 'time'.
            result = Result(time=y_)
            self.api.report_result(dr, result)
コード例 #14
0
 def solve(self, job):
     logging.debug('Starting opentuner')
     failed_jobs_threshold = self.jobs_limit * _FAILED_JOBS_COEF
     manipulator = ConfigurationManipulator()
     for var in job.optimization_job.task_variables:
         if var.HasField('continuous_variable'):
             cont_var = var.continuous_variable
             param = FloatParameter(var.name, cont_var.l, cont_var.r)
         else:
             int_var = var.integer_variable
             param = IntegerParameter(var.name, int_var.l, int_var.r)
         manipulator.add_parameter(param)
     parser = argparse.ArgumentParser(parents=opentuner.argparsers())
     args = parser.parse_args([])
     args.parallelism = 4
     args.no_dups = True
     interface = DefaultMeasurementInterface(args=args,
                                             manipulator=manipulator,
                                             project_name=job.job_id)
     api = TuningRunManager(interface, args)
     jobs = []
     current_value = None
     failed_jobs = 0
     while failed_jobs < failed_jobs_threshold and not self._check_for_termination(
             job):
         remaining_jobs = []
         for job_id, desired_result in jobs:
             res = self._get_evaluation_job_result(job_id)
             if res is not None:
                 if current_value is None or current_value > res + _THRESHOLD_EPS:
                     failed_jobs = 0
                 else:
                     failed_jobs += 1
                 result = Result(time=res)
                 api.report_result(desired_result, result)
             else:
                 remaining_jobs.append((job_id, desired_result))
         jobs = remaining_jobs
         while len(jobs) < self.jobs_limit:
             desired_result = api.get_next_desired_result()
             if desired_result is None:
                 break
             job_id = self._start_evaluation_job(
                 job, desired_result.configuration.data)
             if job_id is None:
                 api.report_result(desired_result, Result(time=math.inf))
             else:
                 jobs.append((job_id, desired_result))
         if not jobs:
             break
         r = api.get_best_result()
         if r is not None:
             current_value = r.time
             logging.debug('Opentuner current state: %s %s', r.time,
                           api.get_best_configuration())
         time.sleep(5)
     res = api.get_best_result().time
     vars = api.get_best_configuration()
     api.finish()
     return res, vars
コード例 #15
0
ファイル: tuner.py プロジェクト: phrb/legup-tuner
def tuning_loop():
    report_delay = 200
    last_time    = time.time()
    start_time   = last_time
    parser       = argparse.ArgumentParser(parents = opentuner.argparsers())

    parser.add_argument("--processes",
                        type = int,
                        help = "Number of Python threads available.")
    parser.add_argument("--no-wait",
                        action = "store_true",
                        help   = "Do not wait for requested results to generate more requests.")

    args         = parser.parse_args()
    pool         = ThreadPool(args.processes)
    manipulator  = ConfigurationManipulator()

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args            = args,
                                            manipulator     = manipulator,
                                            project_name    = 'HLS-FPGAs',
                                            program_name    = 'legup-tuner',
                                            program_version = '0.0.1')

    manager = TuningRunManager(interface, args)

    current_time      = time.time()
    computing_results = []
    computed_results  = []
    desired_results   = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([desired_result,
                                              pool.apply_async(get_wallclock_time,
                                                              (desired_result.configuration.data, ))])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time = cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs    = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time, cfgs).get(timeout = None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(dr, Result(time = result))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()
コード例 #16
0
def tuning_loop():
    report_delay = 5
    last_time = time.time()
    start_time = last_time
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())

    parser.add_argument("--processes",
                        type=int,
                        help="Number of Python threads available.")
    parser.add_argument(
        "--no-wait",
        action="store_true",
        help="Do not wait for requested results to generate more requests.")

    args = parser.parse_args()
    pool = ThreadPool(args.processes)
    manipulator = ConfigurationManipulator()

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(
                IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='HLS-FPGAs',
                                            program_name='legup-tuner',
                                            program_version='0.0.1')

    manager = TuningRunManager(interface, args)

    current_time = time.time()
    computing_results = []
    computed_results = []
    desired_results = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([
                        desired_result,
                        pool.apply_async(get_wallclock_time,
                                         (desired_result.configuration.data, ))
                    ])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time=cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time,
                                         cfgs).get(timeout=None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(dr, Result(time=result))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()
コード例 #17
0
class PostgresTuner():
    """
    An OpenTuner interface for Postgres indexing

    OpenTuner API usage:
    - define a parameter search space for parameters to tune. 
    - define a run loop, in which:
        - OpenTuner returns a configuration, a particular assignment of parameters in search space
        - we execute and evaluate that configuration on the system
        - and return the reward to OpenTuner, refining the search  
    
    Search space:

    1st: 
        - parameter per action taken for candidate index column (allow 3 index columns per index) per index (allow index per query)
        - size... this is (up to) 3 candidate index columns per query * 100 episodes * 20 queries per episodes = 6000
        
    2nd:
        - parameter per action taken for candidate index column per index / indexing decision in an episode
        - size? 3 * 20
        - idea?
            - analogous to how RL agent works. exposed to 20 queries per episode.
            - and evaluate on a random subset of set of train queries 


    Sourced from: 
        - lift/case_studies/{mongodb,mysql}/ 
        - https://github.com/jansel/opentuner/blob/master/examples/py_api/api_example.py
    
    """
    def __init__(self, train_queries, test_queries, experiment_config,
                 result_dir, system_environment):
        """
        Args:
            train_queries (list of queries (SQLQuery queries))
            test_queries (list of queries (SQLQuery queries))
            
            result_dir (str) 
            system (PostgresSystemEnvironment): encapsulates environment
        """
        self.logger = logging.getLogger(__name__)
        self.logger.info('Setting up OpenTuner...')

        self.train_queries = train_queries
        self.test_queries = test_queries
        self.experiment_config = experiment_config

        self.max_size = experiment_config['max_size']
        self.size_weight = experiment_config['size_weight']
        self.max_runtime = experiment_config['max_runtime']
        self.runtime_weight = experiment_config['runtime_weight']
        self.reward_penalty = experiment_config['reward_penalty']

        self.system_environment = system_environment

        self.result_dir = result_dir

        # 2nd search space
        self.n_idxs = experiment_config['n_queries_per_episode']
        self.n_cols_per_idx = 3  # TODO hardcode

        self.tbls = experiment_config['tables']
        self.n_idxs_per_tbl = int(self.n_idxs / len(self.tbls))

        # maps tbls to mapping of tbl's column indices to tbl's columns, per search space representation
        self.tbl_2_col_idx_2_col = {}
        for tbl in self.tbls:
            self.tbl_2_col_idx_2_col[tbl] = {}
            for col_idx, col in enumerate(tpch_table_columns[tbl].keys()):
                self.tbl_2_col_idx_2_col[tbl][col_idx +
                                              1] = col  # + 1 b/c 0 is noop

        # api
        sys.argv = [sys.argv[0]]  # opentuner expects own args
        parser = argparse.ArgumentParser(parents=opentuner.argparsers())
        args = parser.parse_args()
        manipulator = self.build_search_space()
        interface = DefaultMeasurementInterface(args=args,
                                                manipulator=manipulator)
        self.api = TuningRunManager(interface, args)

    def build_search_space(self):
        """
        Set up search space via ConfigurationManipulator, which is responsible for choosing configurations across runs,
        where a configuration is a particular assignment of the parameters. 

        Tightly coupled with agent/system representations used in src.
        
        Repeating from above:
        *initial* search space 
        - parameter per action (candidate index column in index, where candidate index column is one of the query columns) 
          per query
        - see commit 8f94c4b

        *updated* search space
        - parameter per action (same, but here candidate index column is not one of query columns but any of columns in query table)
          per allowed index / indexing decision

         so, at risk of redundancy, an "action" refers to the particular assignment of a parameter, which is 
         an integer indicating noop or a column for a candidate index column 
         
        """

        self.logger.info('Building OpenTuner search space...')

        self.idx_2_idx_cols = {
        }  # store candidate index columns (OpenTuner operates on this) with candidate index (system operates on this)
        self.idx_2_tbl = {}

        manipulator = ConfigurationManipulator()

        for tbl in self.tbls:
            n_cols_per_tbl = len(tpch_table_columns[tbl])

            for candidate_idx in range(self.n_idxs_per_tbl):
                idx_id = "tbl_{}_idx_{}".format(tbl, candidate_idx)
                idx_col_ids = []

                for candidate_idx_col in range(self.n_cols_per_idx):
                    idx_col_id = idx_id + "_idx_col_{}".format(
                        candidate_idx_col)
                    idx_col_ids.append(idx_col_id)

                    manipulator.add_parameter(
                        IntegerParameter(idx_col_id, 0, n_cols_per_tbl))

                self.idx_2_idx_cols[idx_id] = idx_col_ids
                self.idx_2_tbl[idx_id] = tbl

        self.logger.info("... actions are: {}".format(self.idx_2_idx_cols))
        return manipulator

    def run(self, n_iterations):
        """
        Runs OpenTuner search

        run loop - https://github.com/jansel/opentuner/blob/master/examples/py_api/api_example.py
        "desired result" vs "result" - http://groups.csail.mit.edu/commit/papers/2014/ansel-pact14-opentuner.pdf
        """
        self.logger.info('Running OpenTuner...')
        # search
        for i in range(n_iterations):
            self.logger.info('iteration {}/{}...'.format(i + 1, n_iterations))
            start = time.time()

            desired_result = self.api.get_next_desired_result()
            configuration = desired_result.configuration.data

            reward, _ = self.act(configuration)
            result = opentuner.resultsdb.models.Result(time=-1 * reward)
            self.api.report_result(desired_result, result)

            self.logger.info('...received reward {} after {} seconds'.format(
                reward,
                time.time() - start))
        # best from search
        best = self.api.get_best_configuration()
        self.eval_best(best)
        self.system_environment.reset()

    def act(self, cfg):
        """
        Get reward for current configuration i.e. recommended index.

        Args:
            cfg: current configuration returned by OpenTuner search, maps parameter to parameter value
        """
        self.system_environment.reset()
        context = []
        episode_reward = 0

        # for each candidate index, extract decisions for candidate index columns
        for idx_id, idx_col_ids in self.idx_2_idx_cols.items():

            # get integer (i.e. integer value of IntegerParameter)
            actions = []
            for idx_col_id in idx_col_ids:
                action = cfg[idx_col_id]
                actions.append(action)

            # get corresponding attribute for integer
            system_action = []
            tbl = self.idx_2_tbl[idx_id]
            for action in actions:
                if action != 0:  # noop
                    system_action.append(self.tbl_2_col_idx_2_col[tbl][action])

            #
            # execute
            #
            self.system_environment.act(dict(index=system_action, table=tbl))
            context.append(system_action)

        #
        # evaluate on a randomly selected subset of workload
        #
        train_query_sample = np.random.choice(self.train_queries,
                                              size=max(
                                                  len(self.train_queries) / 10,
                                                  1))  # TODO add to config
        for query in train_query_sample:
            query_string, query_string_args = query.sample_query()
            query_time = self.system_environment.execute(
                query_string, query_string_args)
            index_set_size, index_set = self.system_environment.system_status()

            reward = self.system_to_agent_reward(
                data=dict(runtime=query_time, index_size=index_set_size))

            episode_reward += reward

        # N.B. context can't be used(!)
        return episode_reward, context

    def eval_best(self, config):
        self.logger.info('Evaluating best OpenTuner configuration...')
        pdb.set_trace()
        # create configuration
        reward, context = self.act(config)

        runs = self.experiment_config['n_executions']

        runtimes = []
        for query in self.test_queries:
            runtimes_per_query = []
            for _ in range(runs):
                query_string, query_string_args = query.sample_query()
                query_time = self.system_environment.execute(
                    query_string, query_string_args)

                runtimes_per_query.append(query_time)
            runtimes.append(runtimes_per_query)

        index_set_size, index_set = self.system_environment.system_status()

        # runtimes
        np.savetxt(os.path.join(result_dir, 'test_query_times.txt'),
                   np.asarray(runtimes),
                   delimiter=',')

        # index set, index set size
        with open(os.path.join(result_dir, 'test_index_set_stats.csv'),
                  'wb') as f:
            pickle.dump([index_set_size, index_set], f)

    #
    # Auxiliary
    #
    def system_to_agent_reward(self, data):
        """
        Same as in Converter, but didn't want to have Schema / Converter here
        """
        runtime, index_size = data["runtime"], data["index_size"]
        reward = -(self.runtime_weight * runtime) - (self.size_weight *
                                                     index_size)

        if runtime > self.max_runtime:
            reward -= self.reward_penalty
        if index_size > self.max_size:
            reward -= self.reward_penalty
        return reward
コード例 #18
0
    def __init__(self,
                 api_config,
                 techniques=DEFAULT_TECHNIQUES,
                 n_suggestions=1):
        """Build wrapper class to use opentuner optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.

        techniques : iterable of strings
            A list or tuple of techniques to use in opentuner. If the list
            has only one technique, then that technique will be used. If the
            list has multiple techniques a bandit over those techniques
            will be used.

        n_suggestions : int
            Default number of suggestions to be made in parallel.
        """
        AbstractOptimizer.__init__(self, api_config)

        # Opentuner requires DesiredResult to reference suggestion when making
        # its observation. x_to_dr maps the dict suggestion to DesiredResult.
        self.x_to_dr = {}
        # Keep last suggested x and repeat it whenever opentuner gives up.
        self.dummy_suggest = None
        """Setting up the arguments for opentuner. You can see all possible
        arguments using:
        ```
        >>> import opentuner
        >>> opentuner.default_argparser().parse_args(['-h'])
        ```
        We only change a few arguments (other arguments are set to defaults):
        * database = MEMORY_ONLY_DB: to use an in-memory sqlite database
        * parallelism = n_suggestions: num of suggestions to give in parallel
        * technique = techniques: a list of techniques to be used by opentuner
        * print_params = False: to avoid opentuner from exiting after printing
            param spaces
        """
        args = Namespace(
            bail_threshold=500,
            database=MEMORY_ONLY_DB,
            display_frequency=10,
            generate_bandit_technique=False,
            label=None,
            list_techniques=False,
            machine_class=None,
            no_dups=False,
            parallel_compile=False,
            parallelism=n_suggestions,
            pipelining=0,
            print_params=False,
            print_search_space_size=False,
            quiet=False,
            results_log=None,
            results_log_details=None,
            seed_configuration=[],
            stop_after=None,
            technique=techniques,
            test_limit=5000,
        )

        # Setup some dummy classes required by opentuner to actually run.
        manipulator = OpentunerOptimizer.build_manipulator(api_config)
        interface = DMI(args=args, manipulator=manipulator)
        self.api = TuningRunManager(interface, args)
コード例 #19
0
def tuning_loop():
    report_delay = 30
    last_time = time.time()
    start_time = last_time
    iterations = 5
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())

    parser.add_argument("--processes",
                        type=int,
                        help="Number of Python threads available.")
    parser.add_argument(
        "--no-wait",
        action="store_true",
        help="Do not wait for requested results to generate more requests.")
    parser.add_argument("--application", type=str, help="Application name.")
    parser.add_argument("--verilog-file",
                        type=str,
                        help="Verilog file for the application.")

    args = parser.parse_args()
    pool = ThreadPool(args.processes)
    manipulator = ConfigurationManipulator()

    global application
    global verilog_file
    global application_path
    global container_path
    global host_path
    global image_name
    global script_name

    global tuning_init

    application = args.application
    verilog_file = args.verilog_file
    application_path = "/root/legup_src/legup-4.0/examples/chstone/{0}".format(
        application)
    container_path = "/root/legup_src/legup-4.0/examples/chstone/{0}/tuner".format(
        application)
    host_path = "/home/bruelp/legup-tuner/post_place_and_route/py"
    image_name = "legup_quartus"
    script_name = "measure.sh"

    print(application, container_path, application_path)

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(
                IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='HLS-FPGAs',
                                            program_name='legup-tuner',
                                            program_version='0.0.1')

    manager = TuningRunManager(interface, args)

    current_time = time.time()
    computing_results = []
    computed_results = []
    desired_results = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([
                        desired_result,
                        pool.apply_async(get_wallclock_time,
                                         (desired_result.configuration.data, ))
                    ])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time=cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time,
                                         cfgs).get(timeout=None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(
                        dr,
                        Result(time=result['value'],
                               cycles=result['cycles'],
                               fmax=result['fmax'],
                               LU=result['lu'],
                               pins=result['pins'],
                               regs=result['regs'],
                               block=result['block'],
                               ram=result['ram'],
                               dsp=result['dsp']))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()