def start(program): log = logging.getLogger(__name__) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument('--program', help='Name of the StreamJit application') argv = ['--program', program, '--test-limit', '6000'] args = parser.parse_args(argv) if not args.database: args.database = 'sqlite:///' + program + '.db' try: conn = sqlite3.connect('streamjit.db') c = conn.cursor() query = 'SELECT configuration FROM apps WHERE name="%s"'%program c.execute(query) row = c.fetchone() if not row: data = raw_input ( "No entry found with name = %s \nPlease press anykey to exit"%program ) sys.exit(1) cfgString = row[0] cfg = configuration.getConfiguration(cfgString) cfgparams = cfg.getAllParameters() except Exception, e: print 'Exception occured' traceback.print_exc() data = raw_input ( "Press Keyboard to exit..." )
def solve(self, job): logging.debug('Starting opentuner') failed_jobs_threshold = self.jobs_limit * _FAILED_JOBS_COEF manipulator = ConfigurationManipulator() for var in job.optimization_job.task_variables: if var.HasField('continuous_variable'): cont_var = var.continuous_variable param = FloatParameter(var.name, cont_var.l, cont_var.r) else: int_var = var.integer_variable param = IntegerParameter(var.name, int_var.l, int_var.r) manipulator.add_parameter(param) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) args = parser.parse_args([]) args.parallelism = 4 args.no_dups = True interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name=job.job_id) api = TuningRunManager(interface, args) jobs = [] current_value = None failed_jobs = 0 while failed_jobs < failed_jobs_threshold and not self._check_for_termination( job): remaining_jobs = [] for job_id, desired_result in jobs: res = self._get_evaluation_job_result(job_id) if res is not None: if current_value is None or current_value > res + _THRESHOLD_EPS: failed_jobs = 0 else: failed_jobs += 1 result = Result(time=res) api.report_result(desired_result, result) else: remaining_jobs.append((job_id, desired_result)) jobs = remaining_jobs while len(jobs) < self.jobs_limit: desired_result = api.get_next_desired_result() if desired_result is None: break job_id = self._start_evaluation_job( job, desired_result.configuration.data) if job_id is None: api.report_result(desired_result, Result(time=math.inf)) else: jobs.append((job_id, desired_result)) if not jobs: break r = api.get_best_result() if r is not None: current_value = r.time logging.debug('Opentuner current state: %s %s', r.time, api.get_best_configuration()) time.sleep(5) res = api.get_best_result().time vars = api.get_best_configuration() api.finish() return res, vars
def main(): parser = argparse.ArgumentParser(parents=opentuner.argparsers()) args = parser.parse_args() manipulator = ConfigurationManipulator() manipulator.add_parameter(IntegerParameter('x', -200, 200)) interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name='examples', program_name='api_test', program_version='0.1') api = TuningRunManager(interface, args) for x in range(500): desired_result = api.get_next_desired_result() if desired_result is None: # The search space for this example is very small, so sometimes # the techniques have trouble finding a config that hasn't already # been tested. Change this to a continue to make it try again. break cfg = desired_result.configuration.data result = Result(time=test_func(cfg)) api.report_result(desired_result, result) best_cfg = api.get_best_configuration() api.finish() print('best x found was', best_cfg['x'])
def main(): parser = argparse.ArgumentParser(parents=opentuner.argparsers()) args = parser.parse_args() manipulator = ConfigurationManipulator() manipulator.add_parameter(IntegerParameter('x', -200, 200)) interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name='examples', program_name='api_test', program_version='0.1') api = TuningRunManager(interface, args) for x in xrange(500): desired_result = api.get_next_desired_result() if desired_result is None: # The search space for this example is very small, so sometimes # the techniques have trouble finding a config that hasn't already # been tested. Change this to a continue to make it try again. break cfg = desired_result.configuration.data result = Result(time=test_func(cfg)) api.report_result(desired_result, result) best_cfg = api.get_best_configuration() api.finish() print 'best x found was', best_cfg['x']
def start(program): log = logging.getLogger(__name__) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument('--program', help='Name of the StreamJit application') argv = ['--program', program, '--test-limit', '6000'] args = parser.parse_args(argv) if not args.database: args.database = 'sqlite:///' + program + '.db' try: conn = sqlite3.connect('streamjit.db') c = conn.cursor() query = 'SELECT configuration FROM apps WHERE name="%s"' % program c.execute(query) row = c.fetchone() if not row: data = raw_input( "No entry found with name = %s \nPlease press anykey to exit" % program) sys.exit(1) cfgString = row[0] cfg = configuration.getConfiguration(cfgString) cfgparams = cfg.getAllParameters() except Exception, e: print 'Exception occured' traceback.print_exc() data = raw_input("Press Keyboard to exit...")
def get_argparsers(self): """ Returns all command-line argument parsers """ argparser = argparse.ArgumentParser(add_help = False) argparser.add_argument('--append', action = 'store_true', help = 'append new tuning run to existing runs') argparser.add_argument('--output-dir', help = 'output directory') return opentuner.argparsers() + [argparser]
def run(self): """Starts the main OpenTuner loop.""" log.info("tuning thread '%s' starting (%d total threads now).", \ self.name, threading.active_count()) arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers()) config_args = CONFIG.get("opentuner", "args").split() tuner_args = arg_parser.parse_args(config_args) interface = CtreeMeasurementInterface(self._ctree_driver, *self._ot_args, **self._ot_kwargs) TuningRunMain(interface, tuner_args).main() log.info("tuning thread '%s' terminating.", self.name)
def make_args(master="yarn", deploy_mode="cluster"): arg_list = [ "--no-dups", "--test-limit", "1", "--master", master, "--deploy_mode", deploy_mode ] parser = ArgumentParser(parents=argparsers()) parser.add_argument("--master", type=SparkParam.MASTER.make_param_from_str) parser.add_argument("--deploy_mode", type=SparkParam.DEPLOY_MODE.make_param_from_str) return parser.parse_args(arg_list)
def start(argv, cfg, ss): log = logging.getLogger(__name__) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument('--program', help='Name of the StreamJit application') args = parser.parse_args(argv) if not args.database: args.database = 'sqlite:///' + args.program + '.db' main(args, cfg, ss)
def create_test_tuning_run(db): parser = argparse.ArgumentParser(parents=opentuner.argparsers()) args = parser.parse_args() args.database = db manipulator = ConfigurationManipulator() manipulator.add_parameter(IntegerParameter('x', -200, 200)) interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name='examples', program_name='api_test', program_version='0.1') api = TuningRunManager(interface, args) return api
def __init__(self, *ot_args, **ot_kwargs): """ Creates communication queues and spawn a thread to run the tuning logic. """ super(OpenTunerDriver, self).__init__() self._best_config = None interface = CtreeMeasurementInterface(self, *ot_args, **ot_kwargs) arg_parser = argparse.ArgumentParser(parents=opentuner.argparsers()) config_args = CONFIG.get("opentuner", "args").split() tuner_args = arg_parser.parse_args(config_args) self.manager = TuningRunManager(interface, tuner_args) self._converged = False
def __init__(self, train_queries, test_queries, experiment_config, result_dir, system_environment): """ Args: train_queries (list of queries (SQLQuery queries)) test_queries (list of queries (SQLQuery queries)) result_dir (str) system (PostgresSystemEnvironment): encapsulates environment """ self.logger = logging.getLogger(__name__) self.logger.info('Setting up OpenTuner...') self.train_queries = train_queries self.test_queries = test_queries self.experiment_config = experiment_config self.max_size = experiment_config['max_size'] self.size_weight = experiment_config['size_weight'] self.max_runtime = experiment_config['max_runtime'] self.runtime_weight = experiment_config['runtime_weight'] self.reward_penalty = experiment_config['reward_penalty'] self.system_environment = system_environment self.result_dir = result_dir # 2nd search space self.n_idxs = experiment_config['n_queries_per_episode'] self.n_cols_per_idx = 3 # TODO hardcode self.tbls = experiment_config['tables'] self.n_idxs_per_tbl = int(self.n_idxs / len(self.tbls)) # maps tbls to mapping of tbl's column indices to tbl's columns, per search space representation self.tbl_2_col_idx_2_col = {} for tbl in self.tbls: self.tbl_2_col_idx_2_col[tbl] = {} for col_idx, col in enumerate(tpch_table_columns[tbl].keys()): self.tbl_2_col_idx_2_col[tbl][col_idx + 1] = col # + 1 b/c 0 is noop # api sys.argv = [sys.argv[0]] # opentuner expects own args parser = argparse.ArgumentParser(parents=opentuner.argparsers()) args = parser.parse_args() manipulator = self.build_search_space() interface = DefaultMeasurementInterface(args=args, manipulator=manipulator) self.api = TuningRunManager(interface, args)
import adddeps import datetime import argparse import opentuner import logging import os import re import subprocess import pandas as pd import common as cmn from opentuner.resultsdb.models import Result, TuningRun from opentuner.search import manipulator from numpy.f2py.diagnose import run_command argparser = argparse.ArgumentParser(parents=opentuner.argparsers(),add_help=False) argparser.add_argument('--source', help='source file to compile (only give name e.g: MatrixMultiply)') argparser.add_argument('--flags', default='bytecode,codecache,compilation,compiler,deoptimization,gc,interpreter,memory,priorities,temporary', help='define flag combinations to feed separated by commas (E.g: gc,compiler)') argparser.add_argument('--iterations', help='number of iterations to run a program to average runtime', default='3') argparser.add_argument('--configfile', help='where the configurations will be written') class JvmFlagsTunerInterface(opentuner.measurement.MeasurementInterface): __metaclass__ = abc.ABCMeta global count count = 0 def __init__(self, args, *pargs, **kwargs):
def tuning_loop(): report_delay = 200 last_time = time.time() start_time = last_time parser = argparse.ArgumentParser(parents = opentuner.argparsers()) parser.add_argument("--processes", type = int, help = "Number of Python threads available.") parser.add_argument("--no-wait", action = "store_true", help = "Do not wait for requested results to generate more requests.") args = parser.parse_args() pool = ThreadPool(args.processes) manipulator = ConfigurationManipulator() for name in legup_parameters.parameters: parameter_type = legup_parameters.parameter_type(name) values = legup_parameters.parameter_values(name) if parameter_type == int: manipulator.add_parameter(IntegerParameter(name, values[0], values[1])) elif parameter_type == bool: manipulator.add_parameter(BooleanParameter(name)) elif parameter_type == Enum: manipulator.add_parameter(EnumParameter(name, values)) else: print("ERROR: No such parameter type \"{0}\"".format(name)) interface = DefaultMeasurementInterface(args = args, manipulator = manipulator, project_name = 'HLS-FPGAs', program_name = 'legup-tuner', program_version = '0.0.1') manager = TuningRunManager(interface, args) current_time = time.time() computing_results = [] computed_results = [] desired_results = manager.get_desired_results() while current_time - start_time < args.stop_after: if args.no_wait: if len(desired_results) != 0 or len(computing_results) != 0: for desired_result in desired_results: computing_results.append([desired_result, pool.apply_async(get_wallclock_time, (desired_result.configuration.data, ))]) for result in computing_results: if result[1].ready() and result[0] not in computed_results: cost = result[1].get() manager.report_result(result[0], Result(time = cost)) computed_results.append(result) for result in computed_results: if result in computing_results: computing_results.remove(result) computed_results = [] else: if len(desired_results) != 0: cfgs = [dr.configuration.data for dr in desired_results] results = pool.map_async(get_wallclock_time, cfgs).get(timeout = None) for dr, result in zip(desired_results, results): manager.report_result(dr, Result(time = result)) desired_results = manager.get_desired_results() current_time = time.time() if (current_time - last_time) >= report_delay: log_intermediate(current_time - start_time, manager) last_time = current_time current_time = time.time() log_intermediate(current_time - start_time, manager) save_final_configuration(manager.get_best_configuration()) manager.finish()
def tuning_loop(): report_delay = 30 last_time = time.time() start_time = last_time iterations = 5 parser = argparse.ArgumentParser(parents = opentuner.argparsers()) parser.add_argument("--processes", type = int, help = "Number of Python threads available.") parser.add_argument("--no-wait", action = "store_true", help = "Do not wait for requested results to generate more requests.") parser.add_argument("--application", type = str, help = "Application name.") parser.add_argument("--verilog-file", type = str, help = "Verilog file for the application.") args = parser.parse_args() pool = ThreadPool(args.processes) manipulator = ConfigurationManipulator() global application global verilog_file global application_path global container_path global host_path global image_name global script_name global tuning_init application = args.application verilog_file = args.verilog_file application_path = "/root/legup_src/legup-4.0/examples/chstone/{0}".format(application) container_path = "/root/legup_src/legup-4.0/examples/chstone/{0}/tuner".format(application) host_path = "/home/bruelp/legup-tuner/post_place_and_route/py" image_name = "legup_quartus" script_name = "measure.sh" print(application, container_path, application_path) for name in legup_parameters.parameters: parameter_type = legup_parameters.parameter_type(name) values = legup_parameters.parameter_values(name) if parameter_type == int: manipulator.add_parameter(IntegerParameter(name, values[0], values[1])) elif parameter_type == bool: manipulator.add_parameter(BooleanParameter(name)) elif parameter_type == Enum: manipulator.add_parameter(EnumParameter(name, values)) else: print("ERROR: No such parameter type \"{0}\"".format(name)) interface = DefaultMeasurementInterface(args = args, manipulator = manipulator, project_name = 'HLS-FPGAs', program_name = 'legup-tuner', program_version = '0.0.1') manager = TuningRunManager(interface, args) current_time = time.time() computing_results = [] computed_results = [] desired_results = manager.get_desired_results() while current_time - start_time < args.stop_after: if args.no_wait: if len(desired_results) != 0 or len(computing_results) != 0: for desired_result in desired_results: computing_results.append([desired_result, pool.apply_async(get_wallclock_time, (desired_result.configuration.data, ))]) for result in computing_results: if result[1].ready() and result[0] not in computed_results: cost = result[1].get() manager.report_result(result[0], Result(time = cost)) computed_results.append(result) for result in computed_results: if result in computing_results: computing_results.remove(result) computed_results = [] else: if len(desired_results) != 0: cfgs = [dr.configuration.data for dr in desired_results] results = pool.map_async(get_wallclock_time, cfgs).get(timeout = None) for dr, result in zip(desired_results, results): manager.report_result(dr, Result(time = result['value'], cycles = result['cycles'], fmax = result['fmax'], LU = result['lu'], pins = result['pins'], regs = result['regs'], block = result['block'], ram = result['ram'], dsp = result['dsp'])) desired_results = manager.get_desired_results() current_time = time.time() if (current_time - last_time) >= report_delay: log_intermediate(current_time - start_time, manager) last_time = current_time current_time = time.time() log_intermediate(current_time - start_time, manager) save_final_configuration(manager.get_best_configuration()) manager.finish()
import adddeps import argparse import logging import opentuner from opentuner.measurement import MeasurementInterface from opentuner.search.manipulator import ConfigurationManipulator from opentuner.search.manipulator import FloatParameter import measurement_client from measurement_client.client import MeasurementClient from measurement_client.gce_interface.interface import GCEInterface log = logging.getLogger(__name__) parsers = opentuner.argparsers() + measurement_client.argparsers() parser = argparse.ArgumentParser(parents = parsers) parser.add_argument('--dimensions', type=int, default=2, help='dimensions for the Rosenbrock function') parser.add_argument('--domain', type=float, default=1000, help='bound for variables in each dimension') parser.add_argument('--function', default='rosenbrock', choices=('rosenbrock', 'sphere', 'beale'), help='function to use') class Rosenbrock(MeasurementInterface): def run(self, desired_result, input, limit): cfg = desired_result.configuration.data val = 0.0 x0 = cfg[0]
import datetime import argparse import opentuner import logging import os import re import subprocess import pandas as pd import common as cmn import pickle from opentuner.resultsdb.models import Result, TuningRun from opentuner.search import manipulator from numpy.f2py.diagnose import run_command argparser = argparse.ArgumentParser(parents=opentuner.argparsers(),add_help=False) argparser.add_argument('--source', help='source file to compile (only give name e.g: MatrixMultiply)') argparser.add_argument('--flags', default='bytecode,codecache,compilation,compiler,deoptimization,gc,interpreter,memory,priorities,temporary', help='define flag combinations to feed separated by commas (E.g: gc,compiler)') argparser.add_argument('--iterations', help='number of iterations to run a program to average runtime', default='5') argparser.add_argument('--tune', help='What servers are to be tuned', default='tomcat,apache,mysql,tomcat_jvm') class JvmFlagsTunerInterface(opentuner.measurement.MeasurementInterface): __metaclass__ = abc.ABCMeta
def main(self): """ Main function """ argparser = argparse.ArgumentParser(description='Standalone HLS tuner') argparser.add_argument('src_dir', help='Directory with sources', type=os.path.abspath) argparser.add_argument('pragma_file', help='Pragma specification file', type=os.path.abspath) argparser.add_argument('output_dir', help='Output directory', type=os.path.abspath) argparser.add_argument('--overwrite', action='store_true', help='Overwrite existing tuning runs') argparser.add_argument('--append', action='store_true', help='Append new tuning run to existing runs') argparser.add_argument('--technique', action='store', nargs='*', help="Search technique", default=['AUCBanditMetaTechniqueA']) argparser.add_argument('--parallelism', type=int, default=4, help='Number of builds running in parallel') argparser.add_argument('--use_prebuilt', action='store_true', help='Use prebuilt kernel (for debugging)') argparser.add_argument('--max_luts', type=int, help='LUT constraint') argparser.add_argument('--max_regs', type=int, help='Register constraint') argparser.add_argument('--max_dsps', type=int, help='DSP constraint') argparser.add_argument('--max_brams', type=int, help='BRAM constraint') argparser.add_argument('--iters', type=int, help='Number of test iterations') argparser.add_argument( '--no_cleanup', action='store_true', help='Do not remove output of successful builds') args = argparser.parse_args() self.prepare_output_dir(args) self.init_logging(args.output_dir) new_argparser = argparse.ArgumentParser(parents=opentuner.argparsers()) new_args = new_argparser.parse_args('', args) new_args.database = args.output_dir + '/HLS_tuner.db' if args.iters is not None: new_args.test_limit = args.iters log.info('Search algorithm: ' + str(new_args.technique)) if args.max_luts or args.max_regs or args.max_dsps or args.max_brams: objective = ThresholdAreaMinimizeTime(args.max_luts, args.max_regs, args.max_dsps, args.max_brams) else: objective = MinimizeTime() input_manager = FixedInputManager() VivadoHLSInterface.main(new_args, tuner_root=tuner_root, objective=objective, input_manager=input_manager)
from opentuner.search import simplextechniques from opentuner.search import patternsearch from opentuner.search import bandittechniques from opentuner.search import technique COMPILE_CMD = ( '{args.cxx} "{cpp}" -o "{bin}" -I "{args.halide_dir}/include" ' '"{args.halide_dir}/bin/libHalide.a" -ldl -lpthread {args.cxxflags} ' '-DAUTOTUNE_N="{args.input_size}" -DAUTOTUNE_TRIALS={args.trials} ' '-DAUTOTUNE_LIMIT={limit}') log = logging.getLogger('halide') parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument('source', help='Halide source file annotated with ' 'AUTOTUNE_HOOK') parser.add_argument('--halide-dir', default=os.path.expanduser('~/Halide'), help='Installation directory for Halide') parser.add_argument('--input-size', help='Input size to test with') parser.add_argument('--trials', default=3, type=int, help='Number of times to test each schedule') parser.add_argument('--nesting', default=2, type=int, help='Maximum depth for generated loops') parser.add_argument('--max-split-factor', default=8, type=int, help='The largest value a single split() can add') parser.add_argument('--compile-command', default=COMPILE_CMD, help='How to compile generated C++ code') parser.add_argument('--cxx', default='clang++',
def make_parser(): """Creates and returns the default parser""" return ArgumentParser(parents=argparsers(), add_help=True)
def tuning_loop(): report_delay = 5 last_time = time.time() start_time = last_time parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument("--processes", type=int, help="Number of Python threads available.") parser.add_argument( "--no-wait", action="store_true", help="Do not wait for requested results to generate more requests.") args = parser.parse_args() pool = ThreadPool(args.processes) manipulator = ConfigurationManipulator() for name in legup_parameters.parameters: parameter_type = legup_parameters.parameter_type(name) values = legup_parameters.parameter_values(name) if parameter_type == int: manipulator.add_parameter( IntegerParameter(name, values[0], values[1])) elif parameter_type == bool: manipulator.add_parameter(BooleanParameter(name)) elif parameter_type == Enum: manipulator.add_parameter(EnumParameter(name, values)) else: print("ERROR: No such parameter type \"{0}\"".format(name)) interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name='HLS-FPGAs', program_name='legup-tuner', program_version='0.0.1') manager = TuningRunManager(interface, args) current_time = time.time() computing_results = [] computed_results = [] desired_results = manager.get_desired_results() while current_time - start_time < args.stop_after: if args.no_wait: if len(desired_results) != 0 or len(computing_results) != 0: for desired_result in desired_results: computing_results.append([ desired_result, pool.apply_async(get_wallclock_time, (desired_result.configuration.data, )) ]) for result in computing_results: if result[1].ready() and result[0] not in computed_results: cost = result[1].get() manager.report_result(result[0], Result(time=cost)) computed_results.append(result) for result in computed_results: if result in computing_results: computing_results.remove(result) computed_results = [] else: if len(desired_results) != 0: cfgs = [dr.configuration.data for dr in desired_results] results = pool.map_async(get_wallclock_time, cfgs).get(timeout=None) for dr, result in zip(desired_results, results): manager.report_result(dr, Result(time=result)) desired_results = manager.get_desired_results() current_time = time.time() if (current_time - last_time) >= report_delay: log_intermediate(current_time - start_time, manager) last_time = current_time current_time = time.time() log_intermediate(current_time - start_time, manager) save_final_configuration(manager.get_best_configuration()) manager.finish()
import logging import subprocess import os from uuid import uuid4 import opentuner from opentuner.search.manipulator import ConfigurationManipulator, PermutationParameter from opentuner.measurement import MeasurementInterface import measurement_client from measurement_client.client import MeasurementClient from measurement_client.gce_interface.interface import GCEInterface log = logging.getLogger(__name__) parsers = opentuner.argparsers() + measurement_client.argparsers() argparser = argparse.ArgumentParser(parents = parsers) argparser.add_argument( "-last", "--log-last", dest = "loglast", type = str, required = True, help = "File to save best configuration to.") argparser.add_argument( "-size", "--instance-size", dest = "size", type = int, default = 85900, help = "Instance size.") class TSP(MeasurementInterface):
import json import logging import opentuner import os import random import re import shutil import subprocess import sys from opentuner.resultsdb.models import Result, TuningRun from opentuner.search import manipulator log = logging.getLogger('gccflags') argparser = argparse.ArgumentParser(parents=opentuner.argparsers()) argparser.add_argument('source', help='source file to compile') argparser.add_argument( '--compile-template', default='g++ {source} -o {output} {flags}', help='command to compile {source} into {output} with {flags}') argparser.add_argument('--compile-limit', type=float, default=60, help='kill gcc if it runs more than {default} sec') argparser.add_argument('--scaler', type=int, default=4, help='by what factor to try increasing parameters') argparser.add_argument('--cc', default='gcc', help='g++ or gcc') argparser.add_argument('--output',
def tuning_loop(): report_delay = 30 last_time = time.time() start_time = last_time iterations = 5 parser = argparse.ArgumentParser(parents=opentuner.argparsers()) parser.add_argument("--processes", type=int, help="Number of Python threads available.") parser.add_argument( "--no-wait", action="store_true", help="Do not wait for requested results to generate more requests.") parser.add_argument("--application", type=str, help="Application name.") parser.add_argument("--verilog-file", type=str, help="Verilog file for the application.") args = parser.parse_args() pool = ThreadPool(args.processes) manipulator = ConfigurationManipulator() global application global verilog_file global application_path global container_path global host_path global image_name global script_name global tuning_init application = args.application verilog_file = args.verilog_file application_path = "/root/legup_src/legup-4.0/examples/chstone/{0}".format( application) container_path = "/root/legup_src/legup-4.0/examples/chstone/{0}/tuner".format( application) host_path = "/home/bruelp/legup-tuner/post_place_and_route/py" image_name = "legup_quartus" script_name = "measure.sh" print(application, container_path, application_path) for name in legup_parameters.parameters: parameter_type = legup_parameters.parameter_type(name) values = legup_parameters.parameter_values(name) if parameter_type == int: manipulator.add_parameter( IntegerParameter(name, values[0], values[1])) elif parameter_type == bool: manipulator.add_parameter(BooleanParameter(name)) elif parameter_type == Enum: manipulator.add_parameter(EnumParameter(name, values)) else: print("ERROR: No such parameter type \"{0}\"".format(name)) interface = DefaultMeasurementInterface(args=args, manipulator=manipulator, project_name='HLS-FPGAs', program_name='legup-tuner', program_version='0.0.1') manager = TuningRunManager(interface, args) current_time = time.time() computing_results = [] computed_results = [] desired_results = manager.get_desired_results() while current_time - start_time < args.stop_after: if args.no_wait: if len(desired_results) != 0 or len(computing_results) != 0: for desired_result in desired_results: computing_results.append([ desired_result, pool.apply_async(get_wallclock_time, (desired_result.configuration.data, )) ]) for result in computing_results: if result[1].ready() and result[0] not in computed_results: cost = result[1].get() manager.report_result(result[0], Result(time=cost)) computed_results.append(result) for result in computed_results: if result in computing_results: computing_results.remove(result) computed_results = [] else: if len(desired_results) != 0: cfgs = [dr.configuration.data for dr in desired_results] results = pool.map_async(get_wallclock_time, cfgs).get(timeout=None) for dr, result in zip(desired_results, results): manager.report_result( dr, Result(time=result['value'], cycles=result['cycles'], fmax=result['fmax'], LU=result['lu'], pins=result['pins'], regs=result['regs'], block=result['block'], ram=result['ram'], dsp=result['dsp'])) desired_results = manager.get_desired_results() current_time = time.time() if (current_time - last_time) >= report_delay: log_intermediate(current_time - start_time, manager) last_time = current_time current_time = time.time() log_intermediate(current_time - start_time, manager) save_final_configuration(manager.get_best_configuration()) manager.finish()