コード例 #1
0
 def solve(self, job):
     logging.debug('Starting opentuner')
     failed_jobs_threshold = self.jobs_limit * _FAILED_JOBS_COEF
     manipulator = ConfigurationManipulator()
     for var in job.optimization_job.task_variables:
         if var.HasField('continuous_variable'):
             cont_var = var.continuous_variable
             param = FloatParameter(var.name, cont_var.l, cont_var.r)
         else:
             int_var = var.integer_variable
             param = IntegerParameter(var.name, int_var.l, int_var.r)
         manipulator.add_parameter(param)
     parser = argparse.ArgumentParser(parents=opentuner.argparsers())
     args = parser.parse_args([])
     args.parallelism = 4
     args.no_dups = True
     interface = DefaultMeasurementInterface(args=args,
                                             manipulator=manipulator,
                                             project_name=job.job_id)
     api = TuningRunManager(interface, args)
     jobs = []
     current_value = None
     failed_jobs = 0
     while failed_jobs < failed_jobs_threshold and not self._check_for_termination(
             job):
         remaining_jobs = []
         for job_id, desired_result in jobs:
             res = self._get_evaluation_job_result(job_id)
             if res is not None:
                 if current_value is None or current_value > res + _THRESHOLD_EPS:
                     failed_jobs = 0
                 else:
                     failed_jobs += 1
                 result = Result(time=res)
                 api.report_result(desired_result, result)
             else:
                 remaining_jobs.append((job_id, desired_result))
         jobs = remaining_jobs
         while len(jobs) < self.jobs_limit:
             desired_result = api.get_next_desired_result()
             if desired_result is None:
                 break
             job_id = self._start_evaluation_job(
                 job, desired_result.configuration.data)
             if job_id is None:
                 api.report_result(desired_result, Result(time=math.inf))
             else:
                 jobs.append((job_id, desired_result))
         if not jobs:
             break
         r = api.get_best_result()
         if r is not None:
             current_value = r.time
             logging.debug('Opentuner current state: %s %s', r.time,
                           api.get_best_configuration())
         time.sleep(5)
     res = api.get_best_result().time
     vars = api.get_best_configuration()
     api.finish()
     return res, vars
コード例 #2
0
ファイル: modeltuner.py プロジェクト: HansGiesen/hls_tuner
    def tune(self):
        """Optimize the objective function.
    
    Returns
    -------
    dict
      Best configuration found that has not been evaluated yet
    """

        old_results = {
            result.configuration.hash: result
            for result in self._data_set
        }

        best_result = None
        best_new_result = None
        for _ in range(ITERATIONS):
            desired_result = self._technique.desired_result()
            if desired_result == None:
                break

            cfg = desired_result.configuration
            if self._driver.has_results(cfg):
                continue

            result = Result()
            result.configuration = cfg

            old_result = old_results.get(cfg.hash)
            if old_result:
                # Avoid making predictions for evaluated points and getting inaccurate results due to noise.
                for model in self._models:
                    setattr(result, model.metric,
                            getattr(old_result, model.metric))
                    setattr(result, model.metric + "_std_dev", 0.0)
            else:
                for model in self._models:
                    mean, std_dev = model.predict(cfg.data)
                    setattr(result, model.metric, mean)
                    setattr(result, model.metric + "_std_dev", std_dev)

            self._driver.add_result(result)

            self._driver.invoke_result_callback(result)

            # Even though we don't return the best result if we have already evaluated it, we still need it because some
            # search algorithms rely on it.
            if best_result == None or self._objective.lt(result, best_result):
                best_result = result
                self._driver.set_best_result(best_result)

            if not old_result and (best_new_result == None
                                   or self._objective.lt(
                                       result, best_new_result)):
                best_new_result = result

        # Return the configuration associated with the best result.  We do not return a Configuration object because there
        # may already be another Configuration object with the same parameters in the database.  Returning a new object
        # with the same parameters may mess up code that expects the configuration to be unique.
        return best_new_result.configuration.data if best_new_result is not None else None
コード例 #3
0
 def report(self, **kwargs):
     """Report the performance of the most recent configuration."""
     if not self._converged:
         print("Tuning run result:",
               self.curr_desired_result.configuration.data, kwargs)
         self.manager.report_result(self.curr_desired_result,
                                    Result(**kwargs))
コード例 #4
0
    def lookup(self, cfg):
        """
    Look up a configuration in the database and return the associated result if it exists, and None otherwise.
    """

        # Generate a hash for the configuration.
        hash = self._get_hash(cfg)

        # Query the database for the configuration.
        try:
            result = self.Session.query(CachedResult).filter_by(
                cfg_hash=hash).one()
        except NoResultFound:
            return None
        finally:
            # Closing the connection appears necessary to avoid warnings that database objects are created in one thread and
            # used in another.
            self.Session.remove()

        # Copy the relevant fields from the CacheResult object to a new Result object.
        new_result = Result()
        for column in CachedResult.__table__.columns.keys():
            if column not in ["id", "cfg", "cfg_hash"]:
                setattr(new_result, column, getattr(result, column))

        # Return the Result object.
        return new_result
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())
    args = parser.parse_args()
    manipulator = ConfigurationManipulator()
    manipulator.add_parameter(IntegerParameter('x', -200, 200))
    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='examples',
                                            program_name='api_test',
                                            program_version='0.1')
    api = TuningRunManager(interface, args)
    for x in xrange(500):
        desired_result = api.get_next_desired_result()
        if desired_result is None:
            # The search space for this example is very small, so sometimes
            # the techniques have trouble finding a config that hasn't already
            # been tested.  Change this to a continue to make it try again.
            break
        cfg = desired_result.configuration.data
        result = Result(time=test_func(cfg))
        api.report_result(desired_result, result)

    best_cfg = api.get_best_configuration()
    api.finish()
    print 'best x found was', best_cfg['x']
コード例 #6
0
    def observe(self, X, y):
        """Feed the observations back to opentuner.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated.
        """
        assert len(X) == len(y)

        for x_guess, y_ in zip(X, y):
            x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)

            # If we can't find the dr object then it must be the dummy guess.
            if x_guess_ not in self.x_to_dr:
                assert x_guess == self.dummy_suggest, "Appears to be guess that did not originate from suggest"
                continue

            # Get the corresponding DesiredResult object.
            dr = self.x_to_dr.pop(x_guess_, None)
            # This will also catch None from opentuner.
            assert isinstance(
                dr,
                DesiredResult), "DesiredResult object not available in x_to_dr"

            # Opentuner's arg names assume we are minimizing execution time.
            # So, if we want to minimize we have to pretend y is a 'time'.
            result = Result(time=y_)
            self.api.report_result(dr, result)
コード例 #7
0
 def run(self, desired_result, input, limit):
     cfg = desired_result.configuration.data
     print "Running..."
     _, correct, incorrect, size = score_multiple(cfg)
     global best
     if is_better(best, (correct, incorrect)):
         best = (correct, incorrect)
         print best, cfg
     return Result(time=0.0,
                   accuracy=(float(correct)/size),
                   size=incorrect)
コード例 #8
0
ファイル: gccflags.py プロジェクト: iceqp/JATTHotspotTuner
    def run_precompiled(self, desired_result, input, limit, compile_result,
                        result_id):
        # Make sure compile was successful
        if compile_result == self.compile_results['timeout']:
            return Result(state='TIMEOUT', time=float('inf'))
        elif compile_result == self.compile_results['error']:
            return Result(state='ERROR', time=float('inf'))

        tmp_dir = self.get_tmpdir(result_id)
        output_dir = '%s/%s' % (tmp_dir, args.output)
        try:
            run_result = self.call_program([output_dir],
                                           limit=limit,
                                           memory_limit=args.memory_limit)
        except OSError:
            return Result(state='ERROR', time=float('inf'))

        if run_result['returncode'] != 0:
            if run_result['timeout']:
                return Result(state='TIMEOUT', time=float('inf'))
            else:
                log.error('program error')
                return Result(state='ERROR', time=float('inf'))

        return Result(time=run_result['time'])
コード例 #9
0
ファイル: gccflags.py プロジェクト: jackssirs/auto_wps
    def run_precompiled(self, desired_result, input, limit, compile_result,
                        result_id):
        if self.args.force_killall:
            os.system('killall -9 cc1plus 2>/dev/null')
        #Make sure compile was successful
        if compile_result == self.compile_results['timeout']:
            return Result(state='TIMEOUT', time=float('inf'))
        elif compile_result == self.compile_results['error']:
            return Result(state='ERROR', time=float('inf'))

        #lvf, here, we have 'make' libc successfully already. So, we get libc.so libc.so.6

        tmp_dir = self.get_tmpdir(result_id)
        output_dir = '%s/%s' % (tmp_dir, args.output)
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
            s.connect((mlphost, mlpport))
            #lvf, copy bin(libc.so libc.so.6) to WPS-Test-Machine
            print "scp"
            os.system("scp " + LIBC_Build + "/libc.so " + WPS_LIB)
            os.system("scp " + LIBC_Build + "/libc.so.6 " + WPS_LIB)
            #lvf, communicate with mlp model on Monitor-Machine.(tell it: test is ready.)
            print "send message: hasscp"
            s.send("hasscp")
            print "waiting for mlp reply..."
            data = s.recv(1024)
            print "mlp said wps needs seconds:  %s" % data
            #wps may be crashed
            if data == 'wps_crash':
                print "wps crashed"
                log.error('program error')
                return Result(state='ERROR', time=float('inf'))

            data_float = float(data)
            #use test.c tmp.bin initialize run_result
            run_result = self.call_program([output_dir],
                                           limit=limit,
                                           memory_limit=args.memory_limit)
            run_result['time'] = data_float

            #lvf, waiting for Monitor-Machine signal.(To be told: test finished, with returncode and time in seconds)
        except OSError:
            return Result(state='ERROR', time=float('inf'))

            #lvf, make sure returncode is fine.
        if run_result['returncode'] != 0:
            if run_result['timeout']:
                return Result(state='TIMEOUT', time=float('inf'))
            else:
                log.error('program error')
                return Result(state='ERROR', time=float('inf'))

            #lvf, return the time from Monitor-Machine
        return Result(time=run_result['time'])
コード例 #10
0
def main():
    apis = [
        create_test_tuning_run('sqlite:////tmp/a.db'),
        create_test_tuning_run('sqlite:////tmp/b.db'),
        create_test_tuning_run('sqlite:////tmp/c.db')
    ]
    test_funcs = [test_func1, test_func2, test_func3]
    for x in xrange(100):
        for api, test_func in zip(apis, test_funcs):
            desired_result = api.get_next_desired_result()
            if desired_result is None:
                continue
            cfg = desired_result.configuration.data
            result = Result(time=test_func(cfg))
            api.report_result(desired_result, result)

    best_cfgs = [api.get_best_configuration() for api in apis]
    for api in apis:
        api.finish()

    print('best x configs: {}'.format(best_cfgs))
コード例 #11
0
    def run_precompiled(self, desired_result, inp, limit, compile_result,
                        result_id):
        "Get metrics of a built HLS kernel."

        try:
            output_dir = self.args.output_dir + "/{0:04d}".format(result_id)

            with open(output_dir + '/stdout.log', 'r') as input_file:
                report = []
                cosim_tb_output = []
                section = 'report'
                for line in input_file:
                    if line.startswith(
                        ('INFO: [COSIM 212-333]', 'INFO: [COSIM 212-1000]')):
                        section = 'report'
                    if section == 'report':
                        report.append(line)
                    elif section == 'C TB output':
                        cosim_tb_output.append(line)
                    if line.startswith('INFO: [COSIM 212-302]'):
                        section = 'C TB output'
                    elif line.startswith('INFO: [COSIM 212-316]'):
                        section = 'Cosim TB output'
            report = ''.join(report)
            cosim_tb_output = ''.join(cosim_tb_output)

            match = re.search(r'^ERROR: \[(.*)\] (.*)\n', report, re.MULTILINE)
            if match:
                return self.report_error(result_id, match.group(1),
                                         match.group(2))

            match = re.search(r'^ERROR: (.*)\n', report, re.MULTILINE)
            if match:
                return self.report_error("ERROR", match.group(1))

            if not re.search(r'Exiting vivado_hls', report):
                return self.report_error(result_id, 'TIMEOUT',
                                         'Build timed out.')

            cosim_log = glob.glob(
                output_dir +
                '/proj_cholesky/solution1/sim/report/cholesky_top_cosim.rpt'
            )[0]
            with open(cosim_log, 'r') as input_file:
                for line in input_file:
                    if 'Verilog' in line:
                        tokens = line.split('|')
                        latency = float(tokens[5].strip())

            tree = ElementTree.parse(
                output_dir + '/proj_cholesky/solution1/syn/report/csynth.xml')
            luts = int(tree.find('AreaEstimates/Resources/LUT').text)
            regs = int(tree.find('AreaEstimates/Resources/FF').text)
            brams = int(tree.find('AreaEstimates/Resources/BRAM_18K').text)
            dsps = int(tree.find('AreaEstimates/Resources/DSP48E').text)

            if getattr(desired_result.configuration, "extract_struct", False):
                kernel_struc = KernelStructure(output_dir)
                kernel_struc.match_pragmas(self.pragmas)
                kernel_struc.store(output_dir + "/krnl_struc.yml")

            if not self.args.no_cleanup:
                shutil.rmtree(output_dir + '/proj_cholesky')

            os.system("gzip " + output_dir + "/stdout.log " + output_dir +
                      "/vivado_hls.log")

            if self.args.use_prebuilt:
                latency = compile_result['run_time']

            log.info('Latency of configuration %d: %f.', result_id, latency)
            return Result(state='OK',
                          run_time=latency,
                          luts=luts,
                          regs=regs,
                          brams=brams,
                          dsps=dsps,
                          msg='Build was successful.')

        except:
            log.error(traceback.format_exc())
            return Result(state='EXCEPTION', msg='Unknown exception')
コード例 #12
0
def tuning_loop():
    report_delay = 5
    last_time = time.time()
    start_time = last_time
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())

    parser.add_argument("--processes",
                        type=int,
                        help="Number of Python threads available.")
    parser.add_argument(
        "--no-wait",
        action="store_true",
        help="Do not wait for requested results to generate more requests.")

    args = parser.parse_args()
    pool = ThreadPool(args.processes)
    manipulator = ConfigurationManipulator()

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(
                IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='HLS-FPGAs',
                                            program_name='legup-tuner',
                                            program_version='0.0.1')

    manager = TuningRunManager(interface, args)

    current_time = time.time()
    computing_results = []
    computed_results = []
    desired_results = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([
                        desired_result,
                        pool.apply_async(get_wallclock_time,
                                         (desired_result.configuration.data, ))
                    ])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time=cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time,
                                         cfgs).get(timeout=None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(dr, Result(time=result))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()
コード例 #13
0
    def run(self, desired_result, input, limit):
        cfg = desired_result.configuration.data
        self.flags = ''
        print cfg
        if 'tomcat_jvm' in self.tune:
            # assign garbage collector flags
            if (self.gc):
                for flag in self.gc_select_flags:
                    if cfg[flag] == 'on':
                        self.flags += ' -XX:+{0}'.format(flag)
                        if flag == 'UseParallelGC':
                            for gc_flag in self.parallel_common_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.parallel_common_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.parallel_common_param)

                            for gc_flag in self.parallel_young_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.parallel_young_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.parallel_young_param)

                            # with parallel gc it is possible to use or not to use parallel old
                            for gc_flag in self.gc_select_flags:
                                if gc_flag == 'UseParallelOldGC':
                                    if cfg[gc_flag] == 'on':
                                        self.flags += ' -XX:+{0}'.format(
                                            gc_flag)
                                        for parallel_old_flag in self.parallel_old_bool:
                                            self.add_to_flags_bool(
                                                cfg, parallel_old_flag)
                                        for parallel_old_flag in self.parallel_old_param:
                                            self.add_to_flags_param(
                                                cfg, parallel_old_flag,
                                                self.parallel_old_param)
                                    elif cfg[gc_flag] == 'off':
                                        self.flags += ' -XX:-{0}'.format(
                                            gc_flag)
                            break

                        elif flag == 'UseParallelOldGC':
                            for gc_flag in self.parallel_common_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.parallel_common_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.parallel_common_param)

                            for gc_flag in self.parallel_young_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.parallel_young_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.parallel_young_param)

                            for parallel_old_flag in self.parallel_old_bool:
                                self.add_to_flags_bool(cfg, parallel_old_flag)
                            for parallel_old_flag in self.parallel_old_param:
                                self.add_to_flags_param(
                                    cfg, parallel_old_flag,
                                    self.parallel_old_param)
                            break

                        # if g1 gc only use g1 related flags + common
                        elif flag == 'UseG1GC':
                            for gc_flag in self.g1_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.g1_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.g1_param)
                            break

                        elif flag == 'UseConcMarkSweepGC':
                            for gc_flag in self.cms_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.cms_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.cms_param)

                            for gc_flag in self.gc_select_flags:
                                if gc_flag == 'UseParNewGC':
                                    if cfg[gc_flag] == 'on':
                                        self.flags += ' -XX:+{0}'.format(
                                            gc_flag)
                                        for parnew_flag in self.parnew_bool:
                                            self.add_to_flags_bool(
                                                cfg, parnew_flag)
                                        for parnew_flag in self.parnew_param:
                                            self.add_to_flags_param(
                                                cfg, parnew_flag,
                                                self.parnew_param)
                                    elif cfg[gc_flag] == 'off':
                                        self.flags += ' -XX:-{0}'.format(
                                            gc_flag)
                            break

                        elif flag == 'UseParNewGC':
                            for gc_flag in self.parnew_bool:
                                self.add_to_flags_bool(cfg, gc_flag)
                            for gc_flag in self.parnew_param:
                                self.add_to_flags_param(
                                    cfg, gc_flag, self.parnew_param)

                            for gc_flag in self.gc_select_flags:
                                if gc_flag == 'UseConcMarkSweepGC':
                                    if cfg[gc_flag] == 'on':
                                        self.flags += ' -XX:+{0}'.format(
                                            gc_flag)
                                        for cms_flag in self.cms_bool:
                                            self.add_to_flags_bool(
                                                cfg, cms_flag)
                                        for cms_flag in self.cms_param:
                                            self.add_to_flags_param(
                                                cfg, cms_flag, self.cms_param)
                                    elif cfg[gc_flag] == 'off':
                                        self.flags += ' -XX:-{0}'.format(
                                            gc_flag)
                            break

                        # if serial gc only the common gc flags will be used
                        elif flag == 'UseSerialGC':
                            break

                #add common gc flags
                for gc_flag in self.gc_common_bool:
                    self.add_to_flags_bool(cfg, gc_flag)
                for gc_flag in self.gc_common_param:
                    self.add_to_flags_param(cfg, gc_flag, self.gc_common_param)

            # assign compilation flags
            if (self.compilation):
                for flag in self.compilation_bool:
                    if cfg[flag] == 'on':
                        self.flags += ' -XX:+{0}'.format(flag)
                        # if tieredCompilation is on use tiered compilation + c1 compiler flags
                        if flag == 'TieredCompilation':
                            # use tiered compilation flags
                            for tiered_flag in self.tiered_compilation_bool:
                                self.add_to_flags_bool(cfg, tiered_flag)
                            for tiered_flag in self.tiered_compilation_param:
                                self.add_to_flags_param(
                                    cfg, tiered_flag,
                                    self.tiered_compilation_param)

                            # if compiler flags are used use client compiler flags
                            if (self.compiler):
                                for c1_flag in self.client_bool:
                                    self.add_to_flags_bool(cfg, c1_flag)

                                for c1_flag in self.client_param:
                                    self.add_to_flags_param(
                                        cfg, c1_flag, self.client_param)

                    elif cfg[flag] == 'off':
                        self.flags += ' -XX:-{0}'.format(flag)

                for flag in self.compilation_param:
                    self.add_to_flags_param(cfg, flag, self.compilation_param)

            # assign Compiler flags
            if (self.compiler):
                for flag in self.compiler_common_bool:
                    self.add_to_flags_bool(cfg, flag)

                for flag in self.compiler_common_param:
                    self.add_to_flags_param(cfg, flag,
                                            self.compiler_common_param)

                for flag in self.server_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.server_param:
                    self.add_to_flags_param(cfg, flag, self.server_param)

            if (self.bytecode):
                for flag in self.bytecode_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.bytecode_param:
                    self.add_to_flags_param(cfg, flag, self.bytecode_param)

            if (self.codecache):
                for flag in self.codecache_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.codecache_param:
                    self.add_to_flags_param(cfg, flag, self.codecache_param)

            if (self.deoptimization):
                for flag in self.deoptimization_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.deoptimization_param:
                    self.add_to_flags_param(cfg, flag,
                                            self.deoptimization_param)

            if (self.interpreter):
                for flag in self.interpreter_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.interpreter_param:
                    self.add_to_flags_param(cfg, flag, self.interpreter_param)

            if (self.memory):
                for flag in self.memory_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.memory_param:
                    self.add_to_flags_param(cfg, flag, self.memory_param)

            if (self.priorities):
                for flag in self.priorities_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.priorities_param:
                    self.add_to_flags_param(cfg, flag, self.priorities_param)

            if (self.temporary):
                for flag in self.temporary_bool:
                    self.add_to_flags_bool(cfg, flag)
                for flag in self.temporary_param:
                    self.add_to_flags_param(cfg, flag, self.temporary_param)

        if 'apache' in self.tune:
            self.apache_flag_configuration = {}
            for flag in self.apache_flag_names:
                self.apache_flag_configuration[flag] = cfg[flag]

        if 'mysql' in self.tune:
            self.mysql_flag_configuration = {}
            for flag in self.mysql_flag_names:
                self.mysql_flag_configuration[flag] = cfg[flag]

        if 'tomcat' in self.tune:
            self.ajp_flag_configuration = {}
            for flag in self.ajp_flag_names:
                self.ajp_flag_configuration[flag] = cfg[flag]

        run_time = self.execute_program()
        if run_time != -1:
            temp_improvement = float(
                (self.default_metric - run_time) / self.default_metric)
            if temp_improvement >= self.improvement:
                self.improvement = temp_improvement
                self.call_program(
                    "cp result.dat ./TunedConfiguration/tuned.dat")
                self.append_to_config_file("Apache prefork Configuration : ")
                self.append_to_config_file(str(self.apache_flag_configuration))
                self.append_to_config_file("MySQL Configuration : ")
                self.append_to_config_file(str(self.mysql_flag_configuration))
                self.append_to_config_file("AJP Connector Configuration : ")
                self.append_to_config_file(str(self.ajp_flag_configuration))

                self.append_to_config_file("Tomcat JVM Configuration : ")
                self.append_to_config_file(self.flags)
                self.append_to_config_file("Improvement: %s" %
                                           self.improvement)
                self.append_to_config_file("Configuration Found At: %s" %
                                           datetime.datetime.now())
            return Result(time=run_time)
        else:
            return Result(state='ERROR', time=float('inf'))
コード例 #14
0
 def report(self, **kwargs):
     """Report the performance of the most recent configuration."""
     if not self._converged:
         result = Result(**kwargs)
         self._results.put_nowait(result)
コード例 #15
0
ファイル: python_template.py プロジェクト: LWetz/Final
def report_result(runtime):
    api.report_result(desired_result, Result(time=runtime))
コード例 #16
0
    def run(self, desired_result, input, limit):
        cfg = desired_result.configuration.data
        self.flags = ''

        # assign garbage collector flags
        if (self.gc):
            for flag in self.gc_select_flags:
                if cfg[flag] == 'on':
                    self.flags += ' -XX:+{0}'.format(flag)
                    # if serial gc only the common gc flags will be used
                    if flag == 'UseSerialGC':
                        break
                    # if g1 gc only use g1 related flags + common
                    elif flag == 'UseG1GC':
                        for gc_flag in self.g1_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.g1_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.g1_param)
                        break
                    elif flag == 'UseParallelGC':
                        for gc_flag in self.parallel_common_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.parallel_common_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.parallel_common_param)

                        for gc_flag in self.parallel_young_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.parallel_young_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.parallel_young_param)

                        # with parallel gc it is possible to use or not to use parallel old
                        for gc_flag in self.gc_select_flags:
                            if gc_flag == 'UseParallelOldGC':
                                if cfg[gc_flag] == 'on':
                                    self.flags += ' -XX:+{0}'.format(gc_flag)
                                    for parallel_old_flag in self.parallel_old_bool:
                                        self.add_to_flags_bool(
                                            cfg, parallel_old_flag)
                                    for parallel_old_flag in self.parallel_old_param:
                                        self.add_to_flags_param(
                                            cfg, parallel_old_flag,
                                            self.parallel_old_param)
                                elif cfg[gc_flag] == 'off':
                                    self.flags += ' -XX:-{0}'.format(gc_flag)
                        break

                    elif flag == 'UseParallelOldGC':
                        for gc_flag in self.parallel_common_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.parallel_common_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.parallel_common_param)

                        for gc_flag in self.parallel_young_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.parallel_young_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.parallel_young_param)

                        for parallel_old_flag in self.parallel_old_bool:
                            self.add_to_flags_bool(cfg, parallel_old_flag)
                        for parallel_old_flag in self.parallel_old_param:
                            self.add_to_flags_param(cfg, parallel_old_flag,
                                                    self.parallel_old_param)
                        break

                    elif flag == 'UseConcMarkSweepGC':
                        for gc_flag in self.cms_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.cms_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.cms_param)

                        for gc_flag in self.gc_select_flags:
                            if gc_flag == 'UseParNewGC':
                                if cfg[gc_flag] == 'on':
                                    self.flags += ' -XX:+{0}'.format(gc_flag)
                                    for parnew_flag in self.parnew_bool:
                                        self.add_to_flags_bool(
                                            cfg, parnew_flag)
                                    for parnew_flag in self.parnew_param:
                                        self.add_to_flags_param(
                                            cfg, parnew_flag,
                                            self.parnew_param)
                                elif cfg[gc_flag] == 'off':
                                    self.flags += ' -XX:-{0}'.format(gc_flag)
                        break

                    elif flag == 'UseParNewGC':
                        for gc_flag in self.parnew_bool:
                            self.add_to_flags_bool(cfg, gc_flag)
                        for gc_flag in self.parnew_param:
                            self.add_to_flags_param(cfg, gc_flag,
                                                    self.parnew_param)

                        for gc_flag in self.gc_select_flags:
                            if gc_flag == 'UseConcMarkSweepGC':
                                if cfg[gc_flag] == 'on':
                                    self.flags += ' -XX:+{0}'.format(gc_flag)
                                    for cms_flag in self.cms_bool:
                                        self.add_to_flags_bool(cfg, cms_flag)
                                    for cms_flag in self.cms_param:
                                        self.add_to_flags_param(
                                            cfg, cms_flag, self.cms_param)
                                elif cfg[gc_flag] == 'off':
                                    self.flags += ' -XX:-{0}'.format(gc_flag)
                        break

            #add common gc flags
            for gc_flag in self.gc_common_bool:
                self.add_to_flags_bool(cfg, gc_flag)
            for gc_flag in self.gc_common_param:
                self.add_to_flags_param(cfg, gc_flag, self.gc_common_param)

        # assign compilation flags
        if (self.compilation):
            for flag in self.compilation_bool:
                if cfg[flag] == 'on':
                    self.flags += ' -XX:+{0}'.format(flag)
                    # if tieredCompilation is on use tiered compilation + c1 compiler flags
                    if flag == 'TieredCompilation':
                        # use tiered compilation flags
                        for tiered_flag in self.tiered_compilation_bool:
                            self.add_to_flags_bool(cfg, tiered_flag)
                        for tiered_flag in self.tiered_compilation_param:
                            self.add_to_flags_param(
                                cfg, tiered_flag,
                                self.tiered_compilation_param)

                        # if compiler flags are used use client compiler flags
                        if (self.compiler):
                            for c1_flag in self.client_bool:
                                self.add_to_flags_bool(cfg, c1_flag)

                            for c1_flag in self.client_param:
                                self.add_to_flags_param(
                                    cfg, c1_flag, self.client_param)

                elif cfg[flag] == 'off':
                    self.flags += ' -XX:-{0}'.format(flag)

            for flag in self.compilation_param:
                self.add_to_flags_param(cfg, flag, self.compilation_param)

        # assign Compiler flags
        if (self.compiler):
            for flag in self.compiler_common_bool:
                self.add_to_flags_bool(cfg, flag)

            for flag in self.compiler_common_param:
                self.add_to_flags_param(cfg, flag, self.compiler_common_param)

            for flag in self.server_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.server_param:
                self.add_to_flags_param(cfg, flag, self.server_param)

        if (self.bytecode):
            for flag in self.bytecode_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.bytecode_param:
                self.add_to_flags_param(cfg, flag, self.bytecode_param)

        if (self.codecache):
            for flag in self.codecache_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.codecache_param:
                self.add_to_flags_param(cfg, flag, self.codecache_param)

        if (self.deoptimization):
            for flag in self.deoptimization_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.deoptimization_param:
                self.add_to_flags_param(cfg, flag, self.deoptimization_param)

        if (self.interpreter):
            for flag in self.interpreter_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.interpreter_param:
                self.add_to_flags_param(cfg, flag, self.interpreter_param)

        if (self.memory):
            for flag in self.memory_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.memory_param:
                self.add_to_flags_param(cfg, flag, self.memory_param)

        if (self.priorities):
            for flag in self.priorities_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.priorities_param:
                self.add_to_flags_param(cfg, flag, self.priorities_param)

        if (self.temporary):
            for flag in self.temporary_bool:
                self.add_to_flags_bool(cfg, flag)
            for flag in self.temporary_param:
                self.add_to_flags_param(cfg, flag, self.temporary_param)

        run_time = self.execute_program()
        temp_improvement = float(
            (self.default_metric - run_time) / self.default_metric)
        if temp_improvement >= self.improvement:
            self.improvement = temp_improvement
            self.append_to_config_file(self.flags)
            self.append_to_config_file("Improvement: %s" % self.improvement)
            self.append_to_config_file("Configuration Found At: %s" %
                                       datetime.datetime.now())

        return Result(time=run_time)
コード例 #17
0
def run(apis):
    #setup
    args = argparser.parse_args()
    parameter_space = generate.deserialize_parameter_space(args.params)
    arguments_file_name = args.args
    if os.path.isfile(arguments_file_name):
        arguments = generate.deserialize_arguments(arguments_file_name)
    else:
        arguments = parameter_space.get_default_arguments()

    for i in range(args.trials):
        print("{}/{}".format(i, args.trials))

        #create a new argument file
        test_arguments = copy.deepcopy(arguments)
        apis_to_run = []
        desired_results = []
        for api in apis:
            desired_result = api.get_next_desired_result()
            if not desired_result:
                continue
            desired_results.append(desired_result)
            for (parameter,
                 argument) in desired_result.configuration.data.items():
                parameter = parameter_space.parameters[parameter]
                if type(parameter) == generate.IntegerParameter:
                    test_arguments[parameter.name] = argument * parameter.step
                if type(parameter) == generate.BooleanParameter:
                    test_arguments[parameter.name] = argument
                if type(parameter) == generate.PowerOfTwoParameter:
                    test_arguments[parameter.name] = argument
            apis_to_run.append(api)

        if apis_to_run:
            #build with these arguments
            generate.serialize_arguments(test_arguments, arguments_file_name)
            terminal.make_clean("./")

            bench_tests = []
            command_list = []
            for api in apis_to_run:
                bench_test = benchs.all_benchs[
                    api.measurement_interface.benchmark]
                bench_tests.append(bench_test[0]())
                bench_tests[-1].setup(flagss=bench_test[1],
                                      attribute="%peak",
                                      args=arguments_file_name,
                                      remake=True,
                                      verbose=args.verbose)
                command_list += bench_tests[-1].get_command_list()

            #run with these arguments
            output_list = config.run(command_list, verbose=args.verbose)

            #return the results to the apis
            for api, desired_result, bench_test in zip(apis_to_run,
                                                       desired_results,
                                                       bench_tests):
                bench_test.parse_output_list(
                    output_list[:len(bench_test.get_command_list())])
                output_list = output_list[len(bench_test.get_command_list()):]
                result = Result(time=(100.0 / bench_test.get_result()))
                api.report_result(desired_result, result)

    #parse the best configurations
    best_arguments = copy.deepcopy(arguments)
    for api in apis:
        api.search_driver.process_new_results()
        for (parameter, argument) in api.get_best_configuration().items():
            parameter = parameter_space.parameters[parameter]
            if type(parameter) == generate.IntegerParameter:
                best_arguments[parameter.name] = argument * parameter.step
            if type(parameter) == generate.BooleanParameter:
                best_arguments[parameter.name] = argument
            if type(parameter) == generate.PowerOfTwoParameter:
                best_arguments[parameter.name] = argument
        api.finish()
    generate.serialize_arguments(best_arguments, arguments_file_name)
コード例 #18
0
    def run(self, desired_result, input, limit):
        #        print 'run function started'

        #self.set_tomcat()
        cfg = desired_result.configuration.data
        JvmFlagsTuner.lock.acquire()

        try:
            flags = ''
            for flag in self.gc_select_flags:
                if cfg[flag] == 'on':
                    flags += ' -XX:+{0}'.format(flag)
                    if (flag == 'UseSerialGC'):
                        break
                    elif (flag == 'UseParallelOldGC'
                          or flag == 'UseParallelGC'):
                        for gc_through_flag in self.gc_troughput_bool:
                            if cfg[gc_through_flag] == 'on':
                                flags += ' -XX:+{0}'.format(flag)
                            elif cfg[gc_through_flag] == 'off':
                                flags += ' -XX:-{0}'.format(flag)
                        for gc_through_flag in self.gc_throughput_param:
                            value = self.gc_throughput_param[gc_through_flag]
                            param_flag = value['flagname']
                            flags += ' -XX:' + param_flag + "=" + str(
                                cfg[param_flag])
                        break

                    elif (flag == 'UseConcMarkSweepGC'
                          or flag == 'UseParNewGC'):
                        for gc_flag in self.gc_cms_bool:
                            if cfg[gc_flag] == 'on':
                                flags += ' -XX:+{0}'.format(flag)
                            elif cfg[gc_flag] == 'off':
                                flags += ' -XX:-{0}'.format(flag)
                        for gc_flag in self.gc_cms_param:
                            value = self.gc_cms_param[gc_flag]
                            param_flag = value['flagname']
                            flags += ' -XX:' + param_flag + "=" + str(
                                cfg[param_flag])
                        break

                    elif (flag == 'UseG1GC'):
                        for gc_flag in self.g1_gc_bool:
                            if cfg[gc_flag] == 'on':
                                flags += ' -XX:+{0}'.format(flag)
                            elif cfg[gc_flag] == 'off':
                                flags += ' -XX:-{0}'.format(flag)
                        for gc_flag in self.g1_gc_param:
                            value = self.g1_gc_param[gc_flag]
                            param_flag = value['flagname']
                            flags += ' -XX:' + param_flag + "=" + str(
                                cfg[param_flag])
                        break

                elif cfg[flag] == 'off':
                    flags += ' -XX:-{0}'.format(flag)

            #set common gc flags
            for flag in self.gc_common_bool:
                if cfg[flag] == 'on':
                    flags += ' -XX:+{0}'.format(flag)
                elif cfg[flag] == 'off':
                    flags += ' -XX:-{0}'.format(flag)

            for flag_dict in self.gc_common_param:
                flag = self.gc_common_param[flag_dict]
                param_flag = flag['flagname']
                flags += ' -XX:' + param_flag + "=" + str(cfg[param_flag])

            #set jit compiler flags
            for flag in self.compiler_flags_bool:
                if cfg[flag] == 'on':
                    flags += ' -XX:+{0}'.format(flag)
                elif cfg[flag] == 'off':
                    flags += ' -XX:-{0}'.format(flag)

            for flag_dict in self.compiler_flags_param:
                flag = self.compiler_flags_param[flag_dict]
                param_flag = flag['flagname']
                flags += ' -XX:' + param_flag + "=" + str(cfg[param_flag])

            self.set_catalinash(flags)
            print 'Catalina sh is set with flags.'
            run_command = 'sh ' + self.tomcat_bin_location + 'startup.sh'
            stdin, stdout, stderr = self.ssh.exec_command(run_command)
            time.sleep(self.sleep_time)

            #            run_command = 'ab -k -n 1000000 -c 149 http://'+self.remote_ip+':8080/sample/hello'
            run_result = self.call_program(self.ab_command)
            ab_bench_output = run_result['stdout']
            print 'ab_bench_output defaultruntime = ', ab_bench_output
            if len(ab_bench_output) > 0:
                m = re.search('Time per request(.*)\[ms\] \(mean\)',
                              ab_bench_output,
                              flags=re.DOTALL)
                ab_bench_time = '10000'
            else:
                print 'Error running ab. In the run function.'
                sys.exit()
            if m:
                ab_bench_time = m.group(1)
            ab_bench_time = re.sub(':', '', ab_bench_time, flags=re.DOTALL)

            try:
                time_per_request = float(ab_bench_time)
            except:
                time_per_request = 10000
            print 'time_per_request = ', time_per_request
            time.sleep(self.sleep_time)

            if time_per_request == 10000:
                self.set_catalinash('')

            run_command = 'sh ' + self.tomcat_bin_location + 'shutdown.sh'
            stdin, stdout, stderr = self.ssh.exec_command(run_command)
            time.sleep(self.sleep_time)

        finally:
            JvmFlagsTuner.lock.release()

        improvement = (self.default_run_time -
                       time_per_request) * 100.0 / self.default_run_time
        if improvement >= self.improvement_best:
            self.improvement_best = improvement
            commandfile = open('configuration.txt', "a")
            commandfile.write('\n' + run_command)
            commandfile.write(flags)
            commandfile.write('Improvement:' + str(self.improvement_best) +
                              '\n')
            commandfile.close()
        print 'Run function returned successfully. Time per sequest:=>' + str(
            time_per_request)
        return Result(time=time_per_request)
コード例 #19
0
ファイル: main.py プロジェクト: BinTuner/Dev
    def run_precompiled(self, desired_result, input, limit, compile_result,
                        id):
        """
    Run a compile_result from compile() sequentially and return NCD
    """

        assert compile_result['returncode'] == 0

        try:

            print GlobalOptLevel
            #KC = (BasicBlockNumber*(BasicBlockNumber-1))/2

            #-------------------Kolmogorov complexity----------------

            fopen = open("tmp0.bin", 'r')
            P = fopen.read()
            fopen.close(
            )  #must close the file. The buffer may have influce on the result

            CP = len(lzma.compress(P))
            NCP = CP / (len(P) + 2 * (math.log(len(P))))
            #print len(P)
            #print CP
            #print NCP

            #-------------------Normalized Compression Distance-----

            fopen = open("tmp0.bin", 'r')
            gopen = open("O0.bin", 'r')
            P = fopen.read()
            O = gopen.read()
            fopen.close()
            gopen.close()

            ncBytesXY = len(lzma.compress(P + O))
            ncBytesX = len(lzma.compress(P))
            ncBytesY = len(lzma.compress(O))

            ncd = float(ncBytesXY - min(ncBytesX, ncBytesY)) / max(
                ncBytesX, ncBytesY)

            global VC_GlobalCMD

            #NCP = 0
            #NCD = 0
            #print ("--NCP:%s" % NCP)
            print("--NCD:%s" % ncd)

            global maximumValue  # max ncd value
            global currentValue  # current ncd value
            global countValue  # set up iteration times
            print "---Test----"

            print("--Max:%s" % maximumValue)
            print("--Current:%s" % currentValue)
            print("--Count:%s" % countValue)

            if maximumValue != 0:
                currentValue = ncd  #ncd
                if currentValue > maximumValue:
                    maximumValue = currentValue
                    if (maximumValue - currentValue) < currentValue * 0.05:
                        countValue += 1
                        print "---+1 1--"
                        if countValue > 10:
                            print "---over-1--"
                            os._exit(0)
                    else:
                        countValue = 0
                else:
                    countValue += 1
                    print "---+1 2--"
                    if countValue > 10:
                        print "---over-2--"
                        os._exit(0)

            else:
                maximumValue = ncd  #ncd

        finally:
            print "-------------------------------------------------"
            #self.call_program('rm ./tmp{0}.bin'.format(id))
        #print format(id)
        #return Result(time=run_result['time'])
        return Result(time=0, NCD=ncd, CMD=VC_GlobalCMD)
コード例 #20
0
	def run(self, desired_result, input, limit):
		cfg = desired_result.configuration.data

		if self.args.window is None:
			window_param = cfg['window']
		else:
			window_param = self.args.window
		if self.args.interval is None:
			interval_param = cfg['interval']
		else:
			interval_param = self.args.interval
		if self.args.sketch_size is None:
			sketch_size_param = cfg['sketch-size']
		else:
			sketch_size_param = self.args.sketch_size
		if self.args.k_hops is None:
			k_hops_param = cfg['k-hops']
		else:
			k_hops_param = self.args.k_hops
		if self.args.chunk_size is None:
			chunk_size_param = cfg['chunk-size']
		else:
			chunk_size_param = self.args.chunk_size
		if self.args.lambda_param is None:
			lambda_param = cfg['lambda-param']
		else:
			lambda_param = self.args.lambda_param

		print "Configuration: "
		print "\t\t Window: " + str(window_param)
		print "\t\t Interval: " + str(interval_param)
		print "\t\t Sketch Size: " + str(sketch_size_param)
		print "\t\t K Hops: " + str(k_hops_param)
		print "\t\t Chunk Size: " + str(chunk_size_param)
		print "\t\t Lambda: " + str(lambda_param)

		# Compile GraphChi with different flags.
		gcc_cmd = 'g++ -std=c++11 -g -O3 -I/usr/local/include/ -I../graphchi-cpp/src/ -fopenmp -Wall -Wno-strict-aliasing -lpthread'
		gcc_cmd += ' -DSKETCH_SIZE=' + str(sketch_size_param)
		gcc_cmd += ' -DK_HOPS=' + str(k_hops_param)
		gcc_cmd += ' -DDEBUG -DPREGEN=10000 -DMEMORY=1 -g -I../graphchi-cpp/streaming/ ../graphchi-cpp/streaming/main.cpp -o ../graphchi-cpp/bin/streaming/main -lz'

		compile_result = self.call_program(gcc_cmd)
		assert compile_result['returncode'] == 0

		prog = re.compile("\.txt[\._]")

		# Run every training and test graph of the same experiment with the same hyperparameter
		train_base_dir_name = self.args.base_folder_train	# The directory absolute path name from the user input of base training graphs.
		train_base_files = sortfilenames(os.listdir(train_base_dir_name))
		train_stream_dir_name = self.args.stream_folder_train	# The directory absolute path name from the user input of streaming part of the training graphs.
		train_stream_files = sortfilenames(os.listdir(train_stream_dir_name))
		train_sketch_dir_name = self.args.sketch_folder_train	# The directory absolute path name to save the training graph sketch

		test_base_dir_name = self.args.base_folder_test	# The directory absolute path name from the user input of base test graphs.
		test_base_files = sortfilenames(os.listdir(test_base_dir_name))
		test_stream_dir_name = self.args.stream_folder_test	# The directory absolute path name from the user input of streaming part of the test graphs.
		test_stream_files = sortfilenames(os.listdir(test_stream_dir_name))
		test_sketch_dir_name = self.args.sketch_folder_test
		

		for i in range(len(train_base_files)):
			train_base_file_name = os.path.join(train_base_dir_name, train_base_files[i])
			train_stream_file_name = os.path.join(train_stream_dir_name, train_stream_files[i])
			train_sketch_file = 'sketch-' + str(i) + '.txt'
			train_sketch_file_name = os.path.join(train_sketch_dir_name, train_sketch_file)

			run_cmd = '../graphchi-cpp/bin/streaming/main filetype edgelist'
			run_cmd += ' file ' + train_base_file_name
			run_cmd += ' niters 100000'
			run_cmd += ' stream_file ' + train_stream_file_name
			run_cmd += ' decay 500'
			run_cmd += ' lambda ' + str(lambda_param)
			run_cmd += ' window ' + str(window_param)
			run_cmd += ' interval ' + str(interval_param)
			run_cmd += ' sketch_file ' + train_sketch_file_name
			run_cmd += ' chunkify 1 '
			run_cmd += ' chunk_size ' + str(chunk_size_param)

			print run_cmd
			run_result = self.call_program(run_cmd)
			assert run_result['returncode'] == 0

			# clean up after every training graph is run
			for file_name in os.listdir(train_base_dir_name):
				file_path = os.path.join(train_base_dir_name, file_name)
				if re.search(prog, file_path):
					try:
						if os.path.isfile(file_path):
							os.unlink(file_path)
						elif os.path.isdir(file_path):
							shutil.rmtree(file_path)
					except Exception as e:
						print(e)

		for i in range(len(test_base_files)):
			test_base_file_name = os.path.join(test_base_dir_name, test_base_files[i])
			test_stream_file_name = os.path.join(test_stream_dir_name, test_stream_files[i])
			if "attack" in test_base_file_name:
				test_sketch_file = 'sketch-attack-' + str(i) + '.txt'
			else:
				test_sketch_file = 'sketch-' + str(i) + '.txt'
			test_sketch_file_name = os.path.join(test_sketch_dir_name, test_sketch_file)

			run_cmd = '../graphchi-cpp/bin/streaming/main filetype edgelist'
			run_cmd += ' file ' + test_base_file_name
			run_cmd += ' niters 100000'
			run_cmd += ' stream_file ' + test_stream_file_name
			run_cmd += ' decay 500'
			run_cmd += ' lambda ' + str(lambda_param)
			run_cmd += ' window ' + str(window_param)
			run_cmd += ' interval ' + str(interval_param)
			run_cmd += ' sketch_file ' + test_sketch_file_name
			run_cmd += ' chunkify 1 '
			run_cmd += ' chunk_size ' + str(chunk_size_param)

			print run_cmd
			run_result = self.call_program(run_cmd)
			assert run_result['returncode'] == 0

			# clean up after every test graph is run
			for file_name in os.listdir(test_base_dir_name):
				file_path = os.path.join(test_base_dir_name, file_name)
				if re.search(prog, file_path):
					try:
						if os.path.isfile(file_path):
							os.unlink(file_path)
						elif os.path.isdir(file_path):
							shutil.rmtree(file_path)
					except Exception as e:
						print(e)

		# Note that we will read every file within the directory @train_dir_name.
		# We do not do error checking here. Therefore, make sure every file in @train_dir_name is valid.
		sketch_train_files = sortfilenames(os.listdir(train_sketch_dir_name))
		train_sketches, train_targets = load_sketches(sketch_train_files, train_sketch_dir_name, sketch_size_param)
		sketch_test_files = sortfilenames(os.listdir(test_sketch_dir_name))
		test_sketches, test_targets = load_sketches(sketch_test_files, test_sketch_dir_name, sketch_size_param)
		# We generate models once for all CVs
		all_models = model_all_training_graphs(train_sketches, train_targets, sketch_size_param)

		print "We will perform " + str(NUM_CROSS_VALIDATION) + "-fold cross validation..."
		threshold_metric_config = ['mean', 'max']
		num_stds_config = [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0]
		# We record the average results
		# We use (true negatives)/(total validation datasets) as our accuracy metric because we want to minimize that now.
		average_accuracy = 0.0
		# final_printout = ""
		# final_precision = None
		# final_recall = None
		# final_f = None

		# kf = KFold(n_splits=NUM_CROSS_VALIDATION)
		kf = ShuffleSplit(n_splits=NUM_CROSS_VALIDATION, test_size=0.2, random_state=0)
		for benign_train, benign_validate in kf.split(train_targets):
			benign_validate_sketches, benign_validate_names = train_sketches[benign_validate], train_targets[benign_validate]
			kf_test_sketches = np.concatenate((test_sketches, benign_validate_sketches), axis=0)
			kf_test_targets = np.concatenate((test_targets, benign_validate_names), axis=0)
		
			# Modeling (training)
			models = []
			for index in benign_train:
				models.append(all_models[index])

			# Testing
			print "We will attempt multiple cluster threshold configurations for the best results."
			print "Trying: mean/max distances with 1.0, 1,1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0 standard deviation(s)..."
			for tm in threshold_metric_config:
				for ns in num_stds_config:
					tn, tp, fn, fp, total_normal_graphs, total_graphs, recall, precision, accuracy, f_measure, printout  = test_all_graphs(kf_test_sketches, kf_test_targets, sketch_size_param, models, tm, ns)
					print "Threshold metric: " + tm
					print "Number of standard deviations: " + str(ns)
					print "TP: " + str(tp) + "\t TN: " + str(tn) + "\t FP: " + str(fp) + "\t FN: " + str(fn)
					print "Test accuracy: " + str(accuracy)
					print "Test Precision: " + str(precision)
					print "Test Recall: " + str(recall)
					print "Test F-1 Score: " + str(f_measure)
					print "Results: "
					print printout

			# This value currently does not have any significance. We create just so that OpenTuner has an oracle.
			test_accuracy = tn / total_normal_graphs	#TODO: Currently we are concerned only of FPs. 
			average_accuracy = average_accuracy + test_accuracy
		average_accuracy = average_accuracy / NUM_CROSS_VALIDATION

		# For next experiment, remove sketch files from this experiment
		for sketch_train_file in sketch_train_files:
			file_to_remove = os.path.join(train_sketch_dir_name, sketch_train_file)
			try:
				if os.path.isfile(file_to_remove):
					os.unlink(file_to_remove)
			except Exception as e:
				print(e)
		for sketch_test_file in sketch_test_files:
			file_to_remove = os.path.join(test_sketch_dir_name, sketch_test_file)
			try:
				if os.path.isfile(file_to_remove):
					os.unlink(file_to_remove)
			except Exception as e:
				print(e)

		return Result(time=1.0, accuracy=average_accuracy)
コード例 #21
0
    def report_error(self, result_id, state, msg):
        "Put an error message in the log and create a Result object with the error."

        log.error('Configuration %d failed with error: %s (%s)', result_id,
                  state, msg)
        return Result(state=state, msg=msg)
コード例 #22
0
def tuning_loop():
    report_delay = 30
    last_time = time.time()
    start_time = last_time
    iterations = 5
    parser = argparse.ArgumentParser(parents=opentuner.argparsers())

    parser.add_argument("--processes",
                        type=int,
                        help="Number of Python threads available.")
    parser.add_argument(
        "--no-wait",
        action="store_true",
        help="Do not wait for requested results to generate more requests.")
    parser.add_argument("--application", type=str, help="Application name.")
    parser.add_argument("--verilog-file",
                        type=str,
                        help="Verilog file for the application.")

    args = parser.parse_args()
    pool = ThreadPool(args.processes)
    manipulator = ConfigurationManipulator()

    global application
    global verilog_file
    global application_path
    global container_path
    global host_path
    global image_name
    global script_name

    global tuning_init

    application = args.application
    verilog_file = args.verilog_file
    application_path = "/root/legup_src/legup-4.0/examples/chstone/{0}".format(
        application)
    container_path = "/root/legup_src/legup-4.0/examples/chstone/{0}/tuner".format(
        application)
    host_path = "/home/bruelp/legup-tuner/post_place_and_route/py"
    image_name = "legup_quartus"
    script_name = "measure.sh"

    print(application, container_path, application_path)

    for name in legup_parameters.parameters:
        parameter_type = legup_parameters.parameter_type(name)
        values = legup_parameters.parameter_values(name)
        if parameter_type == int:
            manipulator.add_parameter(
                IntegerParameter(name, values[0], values[1]))
        elif parameter_type == bool:
            manipulator.add_parameter(BooleanParameter(name))
        elif parameter_type == Enum:
            manipulator.add_parameter(EnumParameter(name, values))
        else:
            print("ERROR: No such parameter type \"{0}\"".format(name))

    interface = DefaultMeasurementInterface(args=args,
                                            manipulator=manipulator,
                                            project_name='HLS-FPGAs',
                                            program_name='legup-tuner',
                                            program_version='0.0.1')

    manager = TuningRunManager(interface, args)

    current_time = time.time()
    computing_results = []
    computed_results = []
    desired_results = manager.get_desired_results()

    while current_time - start_time < args.stop_after:
        if args.no_wait:
            if len(desired_results) != 0 or len(computing_results) != 0:
                for desired_result in desired_results:
                    computing_results.append([
                        desired_result,
                        pool.apply_async(get_wallclock_time,
                                         (desired_result.configuration.data, ))
                    ])

                for result in computing_results:
                    if result[1].ready() and result[0] not in computed_results:
                        cost = result[1].get()
                        manager.report_result(result[0], Result(time=cost))
                        computed_results.append(result)

                for result in computed_results:
                    if result in computing_results:
                        computing_results.remove(result)

                computed_results = []
        else:
            if len(desired_results) != 0:
                cfgs = [dr.configuration.data for dr in desired_results]
                results = pool.map_async(get_wallclock_time,
                                         cfgs).get(timeout=None)

                for dr, result in zip(desired_results, results):
                    manager.report_result(
                        dr,
                        Result(time=result['value'],
                               cycles=result['cycles'],
                               fmax=result['fmax'],
                               LU=result['lu'],
                               pins=result['pins'],
                               regs=result['regs'],
                               block=result['block'],
                               ram=result['ram'],
                               dsp=result['dsp']))

        desired_results = manager.get_desired_results()

        current_time = time.time()

        if (current_time - last_time) >= report_delay:
            log_intermediate(current_time - start_time, manager)
            last_time = current_time

    current_time = time.time()
    log_intermediate(current_time - start_time, manager)

    save_final_configuration(manager.get_best_configuration())
    manager.finish()