示例#1
0
 def benchmark_kernbench(self):
     self.benchmark = list()
     with urllib.request.urlopen(self.log_url + "/" +
                                 self.testcase) as page:
         g = io.BufferedReader(page)
         t = io.TextIOWrapper(g, 'utf-8')
         for line in t:
             pattern1 = re.compile('^Elapsed Time *(\d*)')
             pattern2 = re.compile('^Context Switches *(\d*)')
             pattern3 = re.compile('^Half load -j (\d*) run number')
             pattern4 = re.compile('^Optimal load -j (\d*) run number')
             m1 = pattern1.match(line)
             m2 = pattern2.match(line)
             m3 = pattern3.match(line)
             m4 = pattern4.match(line)
             if m3:
                 jobs = str(m3.group(1))
                 continue
             if m4:
                 jobs = str(m4.group(1))
                 continue
             if m1:
                 #print(m1.group(1))
                 self.benchmark.append(
                     benchmark('Jobs' + jobs + '/' + 'Elapsed_Time',
                               float(m1.group(1)), -1))
                 continue
             if m2:
                 #print(m2.group(1))
                 self.benchmark.append(
                     benchmark('Jobs' + jobs + '/' + 'Context_Switch',
                               float(m2.group(1)), 1))
                 continue
示例#2
0
def main():

    benchmark.benchmark(
        get_X_y=functools.partial(stream.iter_sklearn_dataset,
                                  datasets.load_boston()),
        n=506,
        get_pp=preprocessing.StandardScaler,
        models=[
            ('creme', 'LinReg',
             linear_model.LinearRegression(optimizer=optim.VanillaSGD(0.01),
                                           l2=0.)),
            ('creme', 'GLM',
             linear_model.GLMRegressor(optimizer=optim.VanillaSGD(0.01),
                                       l2=0.)),
            ('creme', 'GLM detrend',
             meta.Detrender(
                 linear_model.GLMRegressor(optimizer=optim.VanillaSGD(0.01),
                                           l2=0.,
                                           intercept_lr=0.))),
            ('sklearn', 'SGD',
             compat.CremeRegressorWrapper(
                 sklearn_estimator=sk_linear_model.SGDRegressor(
                     learning_rate='constant',
                     eta0=0.01,
                     fit_intercept=True,
                     penalty='none'), )),
        ],
        get_metric=metrics.MSE)
def generate_candidate_xml(dataset, mode='test'):
    infile = dataset + "/" + mode + "_candidate.txt"
    save_file = dataset + "/" + mode + "_candidate.xml"
    terminology = dataset + "/TERMINOLOGY.txt"
    if mode == 'test':
        dev_size = 0
    else:
        dev_size = 500
    g_file(infile, save_file, terminology, dev_size)
    benchmark(infile, terminology)
示例#4
0
文件: rle.py 项目: arturh85/PongClock
def benchmark_1px(com):
    bmp = Bitmap()
    bmp.setPixel(1, 1)
    with benchmark("arduino_0x02") as b2:
        bmp.arduino_0x02(com)
    with benchmark("arduino_0x03") as b3:
        bmp.arduino_0x03(com)
    with benchmark("arduino_0x04") as b4:
        bmp.arduino_0x04(com)

    print(b2.time)
    print(b3.time)
    print(b4.time)
示例#5
0
 def __call__(self, f1, f2, f3, ww1, ww2, ww3):
     #breakpoint()
     #load csv
     stock, bond, USD, stock_i, bond_i, USD_i, date = strategy.strategy(
         f1, f2, f3)
     #weighting
     sharp_ratio, sortino_ratio, excess, var, EoVaR, prof = benchmark.benchmark(
         ww1=ww1,
         ww2=ww2,
         ww3=ww3,
         stock=stock,
         bond=bond,
         USD=USD,
         stock_i=stock_i,
         bond_i=bond_i,
         USD_i=USD_i,
         date=date)
     out_dict = {
         "fund_name": "first_fund",
         "sharp": sharp_ratio,
         "sortino": sortino_ratio,
         "excess": excess,
         "VAR": var,
         "E on VaR": EoVaR,
         "history": [{
             "date": date,
             "unrealized gains": list(prof)
         }]
     }
     print(out_dict)
     return out_dict
     '''
示例#6
0
 def remove(self):
     """wraps given remove function"""
     if self.validate():
         self.users, time = benchmark(
             lambda: self._remove(self.users, self.minimum, self.maximum))
         self.varRemoveCost.set("REMOVING cost: {0:.4}ms".format(1000 *
                                                                 time))
示例#7
0
 def add(self):
     """wraps given add function"""
     self.users, time = benchmark(
         lambda: self._add(self.users, self._parser))
     self.varAddCost.set("ADDING cost: {0:.4}ms".format(
         1000 * time))  # up to 4 decimals
     self.update_info()
示例#8
0
def evaluate(db, config, benchmarks, options):

    total_runtime = 0

    print('Evaluating ' + ' '.join('%s=%s' % i for i in options.items()),
          end='')
    stdout.flush()

    for b in benchmarks:
        compile_for.compile(db, config, override_options=options)

        results = asyncio.get_event_loop().run_until_complete(
            benchmark.benchmark(db, config, b, False))

        # Sum up results
        total_runtime += sum(
            min(
                float(r['value']) for r in reporter_results
                if r['name'] == 'PAPI_REAL_NSEC')
            for (reporter_results, _) in results)

        print('.', end='')
        stdout.flush()

    print(' Result: %.2e' % total_runtime)
    return total_runtime
示例#9
0
 def search(self):
     """wraps given search function"""
     if self.validate():  # if no exceptions are thrown, i.e. valid input
         self.result, time = benchmark(
             lambda: self._search(self.users, self.minimum, self.maximum))
         self.varSearchCost.set("SEARCHING cost: {0:.4}ms".format(1000 *
                                                                  time))
         self.showNext()  # show the first result
示例#10
0
def main():
    def add_hour(x):
        x['hour'] = x['moment'].hour
        return x

    benchmark.benchmark(
        get_X_y=datasets.fetch_bikes,
        n=182470,
        get_pp=lambda:
        (compose.Whitelister('clouds', 'humidity', 'pressure', 'temperature',
                             'wind') +
         (add_hour | feature_extraction.TargetAgg(by=['station', 'hour'],
                                                  how=stats.Mean())
          ) | preprocessing.StandardScaler()),
        models=[
            # ('creme', 'LinReg', linear_model.LinearRegression(
            #     optimizer=optim.VanillaSGD(0.01),
            #     l2=0.
            # )),
            ('creme', 'GLM',
             linear_model.GLMRegressor(optimizer=optim.VanillaSGD(0.01),
                                       l2=0.)),
            ('creme', 'GLM',
             meta.Detrender(
                 linear_model.GLMRegressor(optimizer=optim.VanillaSGD(0.01),
                                           l2=0.))),

            # ('sklearn', 'SGD', compat.CremeRegressorWrapper(
            #     sklearn_estimator=sk_linear_model.SGDRegressor(
            #         learning_rate='constant',
            #         eta0=0.01,
            #         fit_intercept=True,
            #         penalty='none'
            #     ),
            # )),
            # ('sklearn', 'SGD no intercept', compat.CremeRegressorWrapper(
            #     sklearn_estimator=sk_linear_model.SGDRegressor(
            #         learning_rate='constant',
            #         eta0=0.01,
            #         fit_intercept=False,
            #         penalty='none'
            #     ),
            # )),
        ],
        get_metric=metrics.MSE)
示例#11
0
 def benchmark_reaim_ioperf(self):
     self.benchmark = list()
     pattern = re.compile(b'Max Jobs per Minute ([0-9].*\.*[0-9]*)')
     for line in urllib.request.urlopen(self.log_url + "/" + self.testcase):
         m = pattern.match(line)
         if m:
             value = str(m.group(1), 'utf-8')
             self.benchmark.append(benchmark('Jobs_per_Minute', value, 1))
             continue
示例#12
0
 def benchmark_dbench4(self):
     self.benchmark = list()
     with urllib.request.urlopen(self.log_url + "/" +
                                 self.testcase) as page:
         g = io.BufferedReader(page)
         t = io.TextIOWrapper(g, 'utf-8')
         pattern1 = re.compile(
             '^Throughput (\d+\.*\d*) MB/sec  (\d+\.*\d*) clients  (\d+\.*\d*) procs  max_latency=(\d+\.*\d*) ms'
         )
         for line in t:
             m1 = pattern1.match(line)
             if m1:
                 self.benchmark.append(
                     benchmark(
                         "{} processes Throughput(MB/sec)".format(
                             m1.group(3)), m1.group(1), 1))
                 self.benchmark.append(
                     benchmark(
                         "{} processes max_latency(ms)".format(m1.group(3)),
                         m1.group(4), -1))
示例#13
0
def train_agent(training_agent,
                experience_agent_a,
                experience_agent_b,
                benchmark_agent,
                epoch_size,
                epochs,
                verbose=0):
    value_fct = True if isinstance(training_agent, ActorCritic) else False
    encoder = Encoder()
    batch_player = BatchPlayer(agent1=experience_agent_a,
                               agent2=experience_agent_b,
                               no_of_moves=epoch_size,
                               verbose=verbose)

    print("Starting training...")
    for i in range(epochs):
        states, wins, moves, advantages = batch_player.play_games()
        gen = BatchGenerator(encoder=encoder,
                             states=states,
                             wins=wins,
                             moves=moves,
                             value_fct=advantages,
                             epoch_size=epoch_size,
                             output_includes_value_fct=value_fct)

        training_agent.train(generator=gen, verbose=verbose)

        win1, win2 = benchmark.benchmark(training_agent,
                                         benchmark_agent,
                                         during_training_benchmark_size,
                                         verbose=verbose)

        if win1 >= during_training_3perc_p_value:
            training_agent.save_weights()
            experience_agent_b.load_weights()

    training_agent.load_weights()
    benchmark.benchmark(training_agent,
                        benchmark_agent,
                        n=final_benchmark_size,
                        verbose=verbose)
示例#14
0
 def benchmark_bonniepp(self):
     """
     Create a benchmark attribute for each Testcase object.
     This attribute will use to compare subsystem.
     This function could be called 'parser' for testing log.
     """
     self.benchmark = list()
     pattern = re.compile(b'^Machine')
     with urllib.request.urlopen(self.log_url + "/" +
                                 self.testcase) as page:
         g = io.BufferedReader(page)
         t = io.TextIOWrapper(g, 'utf-8')
         for line in t:
             pattern = re.compile('Machine .*Size')
             m = pattern.match(line)
             if m:
                 line = next(t).split()
                 self.benchmark.append(
                     benchmark('Sequential_Output#Per_char#K/s', line[2],
                               1))
                 self.benchmark.append(
                     benchmark('Sequential_Output#Block#K/s', line[4], 1))
                 self.benchmark.append(
                     benchmark('Sequential_Output#Rewrite#K/s', line[6], 1))
                 self.benchmark.append(
                     benchmark('Sequential_Input#Per_char#K/s', line[8], 1))
                 self.benchmark.append(
                     benchmark('Sequential_Input#Block#K/s', line[10], 1))
                 self.benchmark.append(
                     benchmark('Random#Seeks#sec', line[12], -1))
                 break
示例#15
0
def main():
    """
    Main function that benchmarks solutions
    """
    max_size = 22
    sample_pricing = [randint(1, 2000) for _ in range(40)]

    print("Benchmarking recursive cutting with list of prices:")
    benchmark(recursive_cutting, [sample_pricing], range(1, max_size + 1))

    """
    Simple LRU caching does not help this problem. I have commented this out to
    speed up the benchmarkings. Bottom up memoization is way faster
    
    print("Benchmarking recursive cutting with list of prices LRU Cached:")
    sample_pricing = tuple(randint(1, 2000) for _ in range(40))
    benchmark(recursive_cutting_lru_cached, [sample_pricing], range(1, max_size + 1))
    """
    
    print("Benchmarking recursive cutting with top-down memoization:")
    sample_pricing = tuple(randint(1, 2000) for _ in range(40))
    benchmark(rod_cutting_memoized_top_down, [sample_pricing], range(1, max_size + 1))

    print("Benchmarking recursive cutting with bottom-up memoization:")
    sample_pricing = tuple(randint(1, 2000) for _ in range(40))
    benchmark(rod_cutting_memoized_bottom_up, [sample_pricing], range(1, max_size + 1))
示例#16
0
def main():

    x = []
    for n in range(3, 9 + 1):
        x.append(2**n)

    time_cp = []
    time_devito_gpu = []
    time_gt4py_gpu = []
    time_gt4py_cpu = []

    print('time GPU')
    for nx in x:
        print(f'nx = {nx}')
        timing_gpu = benchmark(nx, gpu=True)
        time_cp.append(timing_gpu[0])
        time_devito_gpu.append(timing_gpu[1])
        time_gt4py_gpu.append(timing_gpu[2])
        time_gt4py_cpu.append(timing_gpu[3])

    fig, ax = plt.subplots(figsize=(10, 6))
    ax.tick_params(direction='in')
    ax.set(xlabel='grid size',
           ylabel='time [s]',
           title='timing 3d heat equation on GPU')
    ax.set_xscale('log', basex=2)
    ax.set_yscale('log')
    plt.plot(x, time_cp, label='cupy')
    plt.plot(x, time_devito_gpu, label='devito_gpu')
    plt.plot(x, time_gt4py_gpu, label='gt4py_gpu')
    #plt.plot(x, time_gt4py_cpu, label='gt4py_cpu')
    ax.legend()
    fig.savefig('gpu.png')

    fig, ax = plt.subplots(figsize=(10, 6))
    ax.tick_params(direction='in')
    ax.set(xlabel='grid size',
           ylabel='time [s]',
           title='timing 3d heat equation on GPU')
    ax.set_xscale('log', basex=2)
    plt.plot(x, time_cp, label='cupy')
    plt.plot(x, time_devito_gpu, label='devito_gpu')
    plt.plot(x, time_gt4py_gpu, label='gt4py_gpu')
    #plt.plot(x, time_gt4py_cpu, label='gt4py_cpu')

    ax.legend()
    fig.savefig('gpu_comp.png')
示例#17
0
def main(argv):
    git_repo = GIT_DIR
    git_origin = GITHUB_URL
    src_dir = SRC_DIR
    qmake_file = QMAKE_FILE
    testimages = TESTIMAGES
    testsizes = TESTSIZES
    binary = BINARY
    branches = BRANCHES
    images = []
    for img_prefix in testimages:
        imgs = map(lambda l: img_prefix + "_" + l + ".png", testsizes)
        images += imgs

    try:
        opts, remainder = getopt.getopt(argv, "d:", ["dir="])
    except getopt.GetoptError:
        print "Error in passed parameters"
    else:
        if opts:
            opt, git_dir = opts[0]
            src_dir = git_dir + "/src/"
            images = map(lambda l: git_dir + "/" + IMAGE_DIR + "/" + l, images)
            r = benchmark.benchmark_local(src_dir=src_dir,
                                          images=images,
                                          qmake_file=qmake_file,
                                          binary=binary)
        else:
            print 'In case you want to specify an existing source directory, use "main.py -d <git_dir>" or "main.py dir=<git_dir>'
            images = map(lambda l: git_repo + "/" + IMAGE_DIR + "/" + l,
                         images)
            r = benchmark.benchmark(git_repo=git_repo,
                                    src_dir=src_dir,
                                    branches=branches,
                                    git_origin=git_origin,
                                    images=images,
                                    qmake_file=qmake_file,
                                    binary=binary,
                                    clone_url=CLONE_URL)
    print r

    from datetime import datetime
    f = open(
        "run_" + benchmark.computer_name() + "_" +
        datetime.now().strftime('%y_%m_%d__%H_%M') + '.pickle', 'wb')
    pickle.dump(r, f)
示例#18
0
def main():
    
    x = []
    for n in range(3, 9+1):
        x.append(2 ** n)

    time_np = []
    time_devito = []
    time_gt4py = []
    
    print('time CPU')
    for nx in x:
        print(f'nx = {nx}')    
        timing = benchmark(nx)
        time_np.append(timing[0])
        time_devito.append(timing[1])
        print(timing[1])
        time_gt4py.append(timing[2])  
 
    fig, ax = plt.subplots(figsize=(10,6))
    ax.tick_params(direction='in')
    ax.set(xlabel='grid size', ylabel='time [s]',
           title='timing 3d heat equation on CPU')
    ax.set_xscale('log', basex=2)
    ax.set_yscale('log')
    plt.plot(x, time_np, label='numpy')
    plt.plot(x, time_devito, label='devito')
    plt.plot(x, time_gt4py, label='gt4py')
    
    ax.legend()
    fig.savefig('cpu.png')
    
    fig, ax = plt.subplots(figsize=(10,6))
    ax.tick_params(direction='in')
    ax.set(xlabel='grid size', ylabel='time [s]',
           title='timing 3d heat equation on CPU')
    ax.set_xscale('log', basex=2)
    plt.plot(x, time_np, label='numpy')
    plt.plot(x, time_devito, label='devito')
    plt.plot(x, time_gt4py, label='gt4py')
    
    ax.legend()
    fig.savefig('cpu_comp.png')
示例#19
0
def olsf_stream(data, dataset_name):

    plot_list=[
               ("olsf", olsf().fit, 1),
               ("olvf", b.benchmark('stream').fit, 1)]
    
    x = list(range(len(data)))
    plotlist=[]
    X,y = preprocessData(data) #data is being shuffled inside
    for triple in plot_list:
        if triple[2]==1: plotlist.append((triple[1](X,y), triple[0]))
    for i in range(len(plotlist)):
        plt.plot(x, plotlist[i][0], label=plotlist[i][1])  
    plt.legend()
    plt.xlabel("Instance")
    plt.ylabel("Test Error Rate %")
    plt.plot(range(10), '--bo')
    plt.title(dataset_name)
    plt.savefig('./figures/'+'olsf_stream'+str("_")+time.strftime("%H%M%S")+'.png')
    plt.show()
示例#20
0
def main(argv):
    git_repo = GIT_DIR
    git_origin = GITHUB_URL
    src_dir = SRC_DIR
    qmake_file = QMAKE_FILE
    testimages = TESTIMAGES
    testsizes = TESTSIZES
    binary = BINARY
    branches = BRANCHES
    images = []
    for img_prefix in testimages:
        imgs = map(lambda l: img_prefix+"_"+l+".png", testsizes)
        images += imgs

    try:
        opts, remainder = getopt.getopt(argv, "d:", ["dir="])
    except getopt.GetoptError:
        print "Error in passed parameters"
    else:
        if opts:
            opt, git_dir = opts[0]
            src_dir = git_dir + "/src/"
            images = map(lambda l: git_dir+"/"+IMAGE_DIR+"/"+l, images)
            r = benchmark.benchmark_local(src_dir=src_dir,
                                          images=images,
                                          qmake_file=qmake_file, binary=binary)
        else:
            print 'In case you want to specify an existing source directory, use "main.py -d <git_dir>" or "main.py dir=<git_dir>'
            images = map(lambda l: git_repo+"/"+IMAGE_DIR+"/"+l, images)
            r = benchmark.benchmark(git_repo=git_repo,
                                        src_dir=src_dir,
                                        branches=branches,
                                        git_origin=git_origin,
                                        images=images,
                                        qmake_file=qmake_file, binary=binary,
                                        clone_url=CLONE_URL)
    print r

    from datetime import datetime
    f = open("run_"+benchmark.computer_name()+"_"+datetime.now().strftime('%y_%m_%d__%H_%M')+'.pickle', 'wb')
    pickle.dump(r, f)
from scarab import generate_pair
from benchmark import benchmark


benchmark(generate_pair, 100, verbose=True)
示例#22
0
文件: roc_plot.py 项目: rlim19/VU-FoB
        if hit['benchmark'] == 0:
            fp = fp + 1
        tp_list.append(tp)
        fp_list.append(fp)
    return (tp_list, fp_list)
        
def roc_plot(benchmark, title="ROC plot", filename=""):
    (y, x) = tp_fp_list(benchmark)
    plot.figure()
    plot.title(title)
    plot.xlabel("Number of False Positives")
    plot.ylabel("Number of True Positives")
    plot.axis([0, x[len(x)-1]+1, 0, y[len(y)-1]+1])
    plot.plot(x, y, 'k')
    if (filename != ""):
        plot.savefig(filename)
    #plot.show()

if __name__ == '__main__':
    f = open('proteins.txt', 'r')
    for line in f:
        protein_id = line.strip("\n")
        b = benchmark.benchmark(protein_id,
                      blast_service='plain',
                      method=benchmark.Pfam(),
                      n_alignments=1000,
                      max_evalue=0.01)
        roc_plot(b,
                title="ROC plot Pfam BLAST " + protein_id,
                filename="roc_plot_pfam_plain_" + protein_id)
示例#23
0
# -*- coding: utf-8 -*-

from WienerWindFilter import WienerWindFilter
from benchmark import benchmark

wwf = WienerWindFilter(0, 0, 1024)

f = lambda x, x_fs: wwf.apply(x, 128)
b = benchmark(f, 0, True)

print(b[0])
示例#24
0
def benchmark_fast_gelu(batch, seq_len, intermediate_dimension, data_type,
                        onnx_file, args):
    inputs, outputs = create_inputs_outputs(batch, seq_len,
                                            intermediate_dimension, data_type)
    time = benchmark(onnx_file, inputs, outputs, args)
    return time
kwargs_algorithm = {
    "bee_colony": None, "de": None, "sea": None, "sade": None, "cmaes": None, 
    "pso": None, "pso_gen": None, "mbh": {"algo": pg.nlopt("lbfgs")},
    "naive": None
}

dict_labels = {
    "bee_colony": "Bee Colony", "de": "DE", "sea": "SEA", "sade": "SADE", 
    "cmaes": "CMAES", "pso": "PSO", "pso_gen": "gen. PSO", "mbh": "MBH",
    "naive": "Naive"
}

bmk_project = bmk.benchmark(
    list_problem_names,
    list_algorithm_names,
    kwargs_problem,
    kwargs_algorithm
)

bmk_project.iterations = 20
bmk_project.gen = 500
bmk_project.run_experiment()

# %% Results
bmk_project.accuracy.to_csv("../Data/Accuracy_Final.csv")
bmk_project.logs.to_csv("../Data/Logs_Final.csv")
bmk_project.get_descriptive()
bmk_project.descriptive.to_csv("../Data/Descriptives_Final.csv")
bmk_project.competition.to_csv("../Data/Competitiveness_Final.csv")

fig_perf_profile = bmk_project.performance_profile(range_tau = 25)
def main(template_basename='basic',
         arguments="count=10, text='default text', type=int, object=(1, 2, 3), empty=None",
         template_parameters={},
         translator=None):
    print 'Benchmarking unit test template: %s' % template_basename
    
    # Properties of the test template
    template_filename = '%s.html' % template_basename
    template_filepath = os.path.join(TEST_DATA_DIR, template_filename)
    
    # Load the template from the tests
    with open(template_filepath, 'rt') as template_file:
        template_xml = template_file.read()
    assert template_xml.decode('utf8')
    
    # Parameters to pass to the compiled template
    render_parameters = eval('dict(%s)' % arguments)
    render_parameters.update(template_parameters)

    # Empty renderer to substract
    def empty_renderer(**kws):
        return u''
    def no_operation():
        local_var = empty_renderer(**render_parameters)
    
    # Compile the template to a module without writing it to a file (in memory)
    compiler = python_xml_template_compiler.PythonXMLTemplateCompiler()
    compiler.load(template_xml, template_filename=template_filename) 
    if translator is not None:
        compiler.configure_i18n(translator)
    module_source = compiler.compile(arguments)
    module_source = module_source.rstrip() + '\n'
    module = types.ModuleType(template_basename)
    exec module_source in module.__dict__
    def render_compiled():
        compiled_output = module.render(**render_parameters)
        
    # Load the template into Genshi
    genshi_template = genshi.template.MarkupTemplate(
        template_xml,
        filepath=template_filepath,
        filename=template_filename)
    def render_genshi():
        token_stream = genshi_template.generate(**render_parameters)
        genshi_output = token_stream.render(method='xml', encoding=None)
    
    # Time Genshi template rendering
    genshi_time = min(
        benchmark.benchmark(
            render_genshi, 
            0.1, 
            no_operation=no_operation)
        for n in xrange(10))
    print 'Genshi: %.3f ms' % (genshi_time * 1000)

    # Time compiled template rendering
    compiled_time = min(
        benchmark.benchmark(
            render_compiled, 
            0.1, 
            no_operation=no_operation) 
        for n in xrange(10))
    print 'Compiled: %.3f ms' % (compiled_time * 1000)

    # Time the Cython compiled version if Cython is available
    if cython:
        
        # Write out the compiled template as a pyx file
        pyx_filepath = os.path.join(CWD, '%s.pyx' % template_basename)
        with open(pyx_filepath, 'wt') as module_file:
            module_file.write(module_source)
        
        # Import it via Cython
        import pyximport
        pyximport.install()
        directives = __import__(template_basename, globals(), locals())
        def render_cython_compiled():
            cython_compiled_result = directives.render(**render_parameters)
        
        # Time it
        cython_compiled_time = min(
            benchmark.benchmark(
                render_cython_compiled, 
                0.1, 
                no_operation=no_operation) 
            for n in xrange(10))
        print 'Cython compiled: %.3f ms' % (cython_compiled_time * 1000)
        
    print
示例#27
0
sys.path.append("benchmark/")

from benchmark import benchmark

# Parameters and toggles
ENV_NAME = "CartPole-v0"
NOS_EPISODES = 250
SAVE_PATH = "./models/"
if not os.path.exists(SAVE_PATH):
    os.mkdir(SAVE_PATH)
RENDER_GAME = False

# Setting up Gym
env = gym.make(ENV_NAME)
logger = benchmark()

# Finding the Network Shape
action_space = env.action_space.n
observation_space = env.observation_space.shape

# Initializing neural network
nn_solver = neuralNetwork(action_space, observation_space)

# Running Q Learning
for episode in range(NOS_EPISODES):
    state = env.reset()
    state = np.reshape(state, (1, observation_space[0]))
    time_step = 0
    terminate = False
    while True:
示例#28
0
def timsort(arr):
    n = len(arr)
    if n <= 64:  # On small lists its best to use insertion_sort to minimize function calls
        return insertion_sort(arr)
    else:
        for i in range(0, n, RUN):
            insertion_sort(arr, i, min((i + RUN - 1), (n - 1)))
        # At this point we have one unsorted list
        # containing multiple sorted runs/ blocks

        # Now we need to take 2 runs/ blocks and merge them
        # After each merge we double our merge size
        # that we merge our old merged subarray with the new subarray
        block = RUN
        while block < n:
            for start in range(0, n, 2 * block):
                end = min((start + 2 * block - 1), (n - 1))
                mid = (start + end) // 2
                merge(arr, start, mid, end)

            block *= 2


if __name__ == "__main__":
    test_arr = [3, 1, 7, 4, 12, 9, 11, 102]

    timsort(test_arr)
    print(test_arr)
    import benchmark
    benchmark.benchmark(timsort)
示例#29
0
import sys.path
import os.path
# Import from sibling directory
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")

from scarab import generate_pair
from benchmark import benchmark
from common.utils import binary


pk, sk = generate_pair()
index = binary(42, size=8)


def func():
    pk.encrypt(index, sk)


benchmark(func, 100, verbose=True)
from benchmark import benchmark

margin = 0
delay  = 1
#benchmark("./output/gawk4.benchmark", ("/usr/local/bin/gawk-4.0.0", "-f", "progs/read_file.awk", "input/mydata.csv"), margin, delay)
#benchmark("./output/ruby187.benchmark", ("/usr/bin/ruby1.8", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
#benchmark("./output/ruby193.benchmark", ("/usr/bin/ruby1.9.1", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
#benchmark("./output/perl5124.benchmark", ("/usr/bin/perl", "progs/read_file.pl", "input/mydata.csv"), margin, delay)
benchmark("./output/perl5124_stevan.benchmark", ("/usr/bin/perl", "progs/read_file_stevan.pl", "input/mydata.csv"), margin, delay)
#benchmark("./output/perl5124_stevan_map.benchmark", ("/usr/bin/perl", "progs/read_file_stevan_map.pl", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272.benchmark", ("/usr/bin/python", "progs/read_file.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272.benchmark", ("/usr/bin/python", "progs/read_file.py", "input/mydata.csv"), margin, delay)
benchmark("./output/pypy_tuple.benchmark", ("/opt/pypy/bin/pypy", "progs/read_file_tuple.py", "input/mydata.csv"), margin, delay)
benchmark("./output/pypy_tuple_fun.benchmark", ("/opt/pypy/bin/pypy", "progs/read_file_tuple_fun.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple_fun.benchmark", ("/usr/bin/python", "progs/read_file_tuple_fun.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple.benchmark", ("/usr/bin/python", "progs/read_file_tuple.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272_tuple_unicode.benchmark", ("/usr/bin/python", "progs/read_file_tuple_unicode.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272_tuple_tuple.benchmark", ("/usr/bin/python", "progs/read_file_tuple_tuple.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/python322.benchmark", ("/usr/bin/python3", "progs/read_file3.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272_csv.benchmark", ("/usr/bin/python", "progs/read_file_csv_tomasz.py", "input/mydata.csv"), margin,delay)
#benchmark("./output/java7b147.benchmark", ("/usr/bin/java", "-Xmx3000m", "progs/ReadFileArrayList", "input/mydata.csv"), margin, delay)
#benchmark("./output/c.benchmark", ("progs/read_file", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272_numpy.benchmark", ("/usr/bin/python", "progs/read_file_numpy.py", "input/mydata.csv"), margin, delay)
示例#31
0
"""
Stalinsort
The fastest sorting algorithm known to mankind.
Constant runtime of O(n)
Iterates once over the unsorted list and deletes all unsorted elements.
Done
"""


def stalin_sort(arr):
    if len(arr) <= 1:
        return arr
    max = arr[0]

    def new_max(val):
        nonlocal max
        max = val
        return max

    return [new_max(x) for x in arr if x >= max]


if __name__ == "__main__":
    test_arr = [3, 1, 7, 4, 12, 9, 11, 102]

    print(stalin_sort(test_arr))
    import benchmark
    benchmark.benchmark(stalin_sort)
示例#32
0
def test_benchmark():
    benchmark('hvm_basic_categorization', parallel=True)
    benchmark('hvm_subordinate_tasks', parallel=True)
    benchmark('hvm_all_categorization_tasks', parallel=True)
    benchmark('hvm_figure_ground', parallel=True)
示例#33
0
def run(trainfile, testfile, model_file, train_svms, test_svms):
    train(trainfile, model_file, train_svms)
    result_file = test(testfile, model_file, test_svms)
    benchmark(result_file)
示例#34
0
import sys.path
import os.path
# Import from sibling directory
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")

from scarab import generate_pair
from benchmark import benchmark
from common.utils import binary


pk, sk = generate_pair()
index = binary(42, size=8)
encrypted_one = pk.encrypt(1)
encrypted_zro = pk.encrypt(0)


def func():
    _ = encrypted_one & encrypted_zro


benchmark(func, 100, verbose=True, skip=10)
示例#35
0
"""
Selectionsort
We search for the minimum element and place at the beginning of the unsorted part of the list.
Worst case runtime O(n^2)
"""


def selection_sort(arr):
    for i in range(len(arr)):
        min_idx = i
        for j in range(min_idx + 1, len(arr)):  # remaining unsorted list
            if arr[min_idx] > arr[j]:
                min_idx = j
        arr[i], arr[min_idx] = arr[min_idx], arr[
            i]  # swap found minimal with the first element
    return arr


if __name__ == "__main__":
    test_arr = [3, 1, 7, 4, 12, 9, 11, 102]

    print(selection_sort(test_arr))
    import benchmark
    benchmark.benchmark(selection_sort)
示例#36
0
from benchmark import benchmark

margin = 0
delay  = 1
benchmark("./output/gawk4.benchmark", ("/usr/local/bin/gawk-4.0.0", "-f", "progs/read_file.awk", "input/mydata.csv"), margin, delay)
benchmark("./output/ruby187.benchmark", ("/usr/bin/ruby1.8", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
benchmark("./output/ruby193.benchmark", ("/usr/bin/ruby1.9.1", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
benchmark("./output/perl5124.benchmark", ("/usr/bin/perl", "progs/read_file.pl", "input/mydata.csv"), margin, delay)
benchmark("./output/perl5124_stevan.benchmark", ("/usr/bin/perl", "progs/read_file_stevan.pl", "input/mydata.csv"), margin, delay)
benchmark("./output/python272.benchmark", ("/usr/bin/python", "progs/read_file.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272.benchmark", ("/usr/bin/python", "progs/read_file.py", "input/mydata.csv"), margin, delay)
benchmark("./output/pypy_tuple.benchmark", ("/opt/pypy/bin/pypy", "progs/read_file_tuple.py", "input/mydata.csv"), margin, delay)
benchmark("./output/pypy_tuple_fun.benchmark", ("/opt/pypy/bin/pypy", "progs/read_file_tuple_fun.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple_fun.benchmark", ("/usr/bin/python", "progs/read_file_tuple_fun.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple.benchmark", ("/usr/bin/python", "progs/read_file_tuple.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple_unicode.benchmark", ("/usr/bin/python", "progs/read_file_tuple_unicode.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple_tuple.benchmark", ("/usr/bin/python", "progs/read_file_tuple_tuple.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python322.benchmark", ("/usr/bin/python3", "progs/read_file3.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_csv.benchmark", ("/usr/bin/python", "progs/read_file_csv_tomasz.py", "input/mydata.csv"), margin,delay)
benchmark("./output/java7b147.benchmark", ("/usr/bin/java", "-Xmx3000m", "progs/ReadFileArrayList", "input/mydata.csv"), margin, delay)
benchmark("./output/c.benchmark", ("progs/read_file", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_numpy.benchmark", ("/usr/bin/python", "progs/read_file_numpy.py", "input/mydata.csv"), margin, delay)
benchmark("./output/c_2.benchmark", ("progs/read_file2", "input/mydata.csv"), margin, delay)
benchmark("./output/lua.benchmark", ("/usr/bin/lua", "progs/read_file.lua", "input/mydata.csv"), margin, delay)
from benchmark import benchmark

margin = 0
delay  = 1
benchmark("./output/ruby187.benchmark", ("/usr/bin/ruby1.8", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
benchmark("./output/ruby193.benchmark", ("/usr/bin/ruby1.9.1", "progs/read_file.rb", "input/mydata.csv"), margin, delay)
benchmark("./output/perl5124.benchmark", ("/usr/bin/perl", "progs/read_file.pl", "input/mydata.csv"), margin, delay)
benchmark("./output/perl5124_stevan.benchmark", ("/usr/bin/perl", "progs/read_file_stevan.pl", "input/mydata.csv"), margin, delay)
benchmark("./output/python272.benchmark", ("/usr/bin/python", "progs/read_file.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_tuple.benchmark", ("/usr/bin/python", "progs/read_file_tuple.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python322.benchmark", ("/usr/bin/python3", "progs/read_file3.py", "input/mydata.csv"), margin, delay)
benchmark("./output/python272_csv.benchmark", ("/usr/bin/python", "progs/read_file_csv_tomasz.py", "input/mydata.csv"), margin,delay)
benchmark("./output/java7b147.benchmark", ("/usr/bin/java", "-Xmx3000m", "progs/ReadFileArrayList", "input/mydata.csv"), margin, delay)

#benchmark("./output/mawk.benchmark", ("/usr/bin/mawk", "-f", "progs/read_file.awk", "input/mydata.csv"), margin, delay)
#benchmark("./output/python272_numpy.benchmark", ("/usr/bin/python", "progs/read_file_numpy.py", "input/mydata.csv"), margin, delay)
#benchmark("./output/java7b147_vector.benchmark", ("/usr/bin/java", "-Xmx3000m", "progs/ReadFile", "input/mydata.csv"), margin, delay)
示例#38
0
							'wordcount': corpus.source_wordcount,
							'linecount': corpus.lines,
							'unique': corpus.unique})
	# pdb.set_trace()
	simplereport(log,inputs)

if __name__ == '__main__':
	f1 = '/Users/margoK/Dropbox/autocomplete/sampletexts/whitmanpoem.txt'
	f2 = '/Users/margoK/Dropbox/autocomplete/sampletexts/allshakespeare.txt'

	corpus = []
	
	def trie_build_test(f):
		corpus.append(trie_build(f))

	# benchmark(inputs = (f2,'foo'),fns=trie_build_test,reportfn=simplereport)
	benchmark(inputs=(f2,)fns=trie_build_test,reportfn=simplereport)
	# corpus = corpus[0]

	def test_trie_search(_,word):
		trie_search(corpus,word)

	test_funcs = [linear_search, test_trie_search]#lambda _, word: trie_search(corpus, word)]
	benchmark(inputs=(f2, 'dead'),fns=test_funcs,reportfn=corpusreport,trials=1,corpus=corpus[0])


	 # test_funcs = (linear_search,try_trie)
	 # benchmark((f1,'dead'),test_funcs)
	# f2 = '/Users/margoK/Dropbox/autocomplete/shakespeare.txt'
	# benchmark((f2,'dream'),test_funcs)
示例#39
0
文件: main.py 项目: neovdr/VU-FoB
            elif m[0] == "scop":
                ms.append(benchmark.SCOP())
            else:
                assert False, "We made an unkown method!"
        method = benchmark.CombineTakeOnes(ms)
            

    f = open(filename, 'r')
    for line in f:
        protein_id = line.strip("\n")
        plot_title = ("BLAST(e=" + str(evalue) + ") "
                      + protein_id +
                      " (" + method.fname() + ")")
        plot_filename = "roc_plot_" + method.name() + "_" + protein_id
        b = benchmark.benchmark(protein_id,
            golden_standard=method,
            search_method=benchmark.Blast(evalue, max_alignments))
        b_random = benchmark.benchmark(protein_id,
             golden_standard=method,
             search_method=benchmark.RandomUniprot())
        roc_plot.roc_plot(b, title=plot_title, filename=plot_filename,
             random=b_random)
    

def usage():
    print """This is a script that draws roc plots of using BLAST for homology
search compared to a golden standard (GeneOntology, Pfam or SCOP). One filename
is expected as an argument that contains a list of uniprot protein ids to use.

Command line options:
示例#40
0
文件: test.py 项目: nathdwek/codegen
from symbol import Symbol
from source import Source
from algos import weaver, fanno, huffman, block
from benchmark import benchmark

a = Symbol('A', 2**-5)
b = Symbol('b', 2**-5)
c = Symbol('c', 2**-5)
d = Symbol('d', 2**-5)
e = Symbol('e', 2**-4)
f = Symbol('f', 2**-4)

g = Symbol('g', 2**-2)
h = Symbol('h', 2**-1)
i = Symbol('i', 0.05)

j = Symbol('j', 0.5)
k = Symbol('k', 0.25)
l = Symbol('l', 0.125)
m = Symbol('m', 0.125)

s = Source(d, b, f, a, e, c, g, h)
c = block(s)


for sym in s.sorted():
    print(sym.name(), ' ', c.codeOf(sym))

benchmark(s, c)
示例#41
0
    return np.real(roots[0])


dataB = []
dataG = []

J = 1.0

for d in range(2, 5):
    si = np.array(np.meshgrid(*[[0, 1] for _ in range(d)], indexing='ij'))
    l = np.tensordot(2**np.arange(d), si, axes=((0, ), (0, )))
    xi = l * (2 / 2**d) - 1

    x = gg(xi, xi)

    dt, b = benchmark(brute.findBest, x, 1e-8)
    dataB.append([
        d,
        keff(2, 2 * d, sum([v.size for v in b[2]])),
        sum([v.size for v in b[2]]) * 1. / x.size, dt
    ])

    dt, g = benchmark(greedy.findBest, x, 1e-8)
    dataG.append([
        d,
        keff(2, 2 * d, sum([v.size for v in g])),
        sum([v.size for v in g]) * 1. / x.size, dt
    ])

    print(d, dataB[-1][1], dataG[-1][1])
def synthesis(name):

    result_epoch = {}
    for syn in EPOCHS_SYNTHS:
        print(syn)
        synthesizer = syn(epochs=1)

        print('----------------------EVALUATING EPOCH SYNTHESIZER: ', syn)

        data, categorical_columns, ordinal_columns = load_dataset(name)
        synthesizer.fit(data, categorical_columns, ordinal_columns)

        print(
            '----------------------SYNTHESIZED DATA----------------------------------------'
        )

        synthesized_data = pd.DataFrame(synthesizer.sample(100))
        print(synthesized_data)

        #writing synthesized data to CSV on S3 bucket
        print('-------Writing to S3 Bucket------------')

        write_to_s3(synthesized_data, 'synthesized_{}'.format(name))

        print(
            '------------------------BENCHMARK RESULTS-------------------------------------'
        )
        benchmarked_data = pd.DataFrame(
            benchmark(synthesizer.fit_sample, datasets=[name], repeat=1))
        print(benchmarked_data)

        max_bench_score = pd.DataFrame(max(benchmarked_data.iloc[:, 1]))
        print(max_bench_score)

        result_epoch.update({syn: max_bench_score.iloc[:, 1]})

    print(
        '----------------------------DONE RUNNING ALL EPOCH SYNTHESIZERS-----------------------------'
    )

    result_noinit = {}
    for syn in NO_INIT:
        synthesizer = syn()
        print('----------------------EVALUATING NO_INIT SYNTHESIZER: ', syn)

        data, categorical_columns, ordinal_columns = load_dataset(name)
        synthesizer.fit(data, categorical_columns, ordinal_columns)
        print(
            '----------------------SYNTHESIZED DATA----------------------------------------'
        )

        synthesized_data = pd.DataFrame(synthesizer.sample(50))
        print(synthesized_data)

        #writing synthesized data to CSV on S3 bucket
        print(
            '-----------------------Writing to S3 Bucket-----------------------------------'
        )

        write_to_s3(synthesized_data, 'synthesized_{}'.format(name))

        print(
            '------------------------BENCHMARK RESULTS-------------------------------------'
        )
        benchmarked_data = pd.DataFrame(
            benchmark(synthesizer.fit_sample, datasets=[name], repeat=1))
        print(benchmarked_data)

        #print(max(benchmarked_data.iloc[:,1]))
        max_bench_score = max(benchmarked_data.iloc[:, 1])
        print(max_bench_score)

        result_noinit.update({syn: max_bench_score})
        print(result_noinit)

    print(
        '----------------------------DONE RUNNING ALL NO_INIT SYNTHESIZERS-----------------------------'
    )
    final_res = {**result_epoch, **result_noinit}
    final_res = pd.DataFrame.from_dict(final_res, orient='index')

    print('FINAL RESULT : ', final_res)

    print(
        '-----------------------Writing to S3 Bucket-----------------------------------'
    )

    write_to_s3(final_res, 'benchmarked_{}'.format(name))

##############################################################################

if __name__ == "__main__":
    # read in the sample text
    with open("sample.txt", "r") as f:
        text = f.read().strip()
    # list of the functions to test
    functions = [
        caesar, caesar_list, caesar_chr_code_list,
        caesar_chr_code_inplace_list, caesar_translate_embedded_class,
        caesar_translate_independent_class, caesar_translate_precalculated_dict
    ]
    # test the functions
    correct = caesar(text[:100], 15)
    for f in functions:
        if f(text[:100], 15) != correct:
            print("Function {} FAILED".format(f.__name__))
            exit(1)
        else:
            print("Function {} passed".format(f.__name__))
    # benchmark the functions
    for length in (1, 10, 100, 1000, 10000, 100000):
        # get text of the right length and print header
        test_text = text[:length]
        print("\n{:,} long string".format(length))
        for f in functions:
            # benchmark each function
            benchmark(f, n=1000, args=[test_text, 5])
示例#44
0
 def add(self):
     """wraps given add function"""
     self.users, time = benchmark(lambda: self._add(self.users, self._parser))
     self.varAddCost.set("ADDING cost: {0:.4}ms".format(1000 * time))  # up to 4 decimals
     self.update_info()
示例#45
0
 def search(self):
     """wraps given search function"""
     if self.validate():  # if no exceptions are thrown, i.e. valid input
         self.result, time = benchmark(lambda: self._search(self.users, self.minimum, self.maximum))
         self.varSearchCost.set("SEARCHING cost: {0:.4}ms".format(1000 * time))
         self.showNext()  # show the first result
示例#46
0
 def remove(self):
     """wraps given remove function"""
     if self.validate():
         self.users, time = benchmark(lambda: self._remove(self.users, self.minimum, self.maximum))
         self.varRemoveCost.set("REMOVING cost: {0:.4}ms".format(1000 * time))
         self.update_info()
示例#47
0
if __name__ == '__main__':
    server = args.s
    connection = args.c
    record_directory = args.d
    test_time = args.t
    benchmark_test = benchmark_dict[args.b]
    cluster_mode_enable = args.C
    script_path = args.l
    tab_name = args.n
    other_args_dict = args.my_dict
    reset_slowlog = args.r

    create_dir(record_directory)
    bh = benchmark.benchmark(record_directory,
                             test_time,
                             benchmark_test,
                             server,
                             other_args_dict=other_args_dict)
    if cluster_mode_enable:
        bh.init_cluster_client()
    else:
        bh.init_client()
    if script_path:
        bh.load_lua_script(script_path)
    if reset_slowlog:
        bh.reset_slow()
    start_time = bh.get_time()
    commandstats_df_begin = bh.get_commandstats()
    threads = list()
    for index in range(connection):
        x = threading.Thread(target=bh.run)
示例#48
0
import argparse, benchmark, copy, random, sort, time

parser = argparse.ArgumentParser()
parser.add_argument("-b", "--benchmark", action='store_true', help="Run benchmarking")
args = parser.parse_args()

if args.benchmark:
	benchmark.benchmark()
else:
	values = range(1000)
	random.shuffle(values)

	def writeSortProcess(data, algorithm):
		print algorithm.__name__ + " sort...\n"
		print "Starting:\n" + str(data)
		dataCopy = copy.copy(data)
		start = time.clock()
		sorted = algorithm(dataCopy)
		end = time.clock()
		print "\nEnding:\n" + str(sorted)
		print "TIME: " + str((end - start)) + "s\n"
		print "----------------------------------------\n"

	writeSortProcess(values, sort.insertion)
	writeSortProcess(values, sort.selection)