Пример #1
0
def main(clock_type):
    FORMAT = '%(asctime)-15s %(message)s'
    logging.basicConfig(format=FORMAT, level=logging.DEBUG)

    if _SCRPDIR == "":
        logging.error("Could not find script directory")
        return True
    if clock_type.upper() not in _PARSE_CLOCK_MAP:
        logging.error("Supplied clock type \"%s\" has no parsing method",
                     clock_type)
        return True

    logging.info("Compiling and adb pushing timing exec")
    compile_and_push(clock_type.upper())

    logging.info("Running with clock_type: %s", clock_type)
    for cont_level in _CONTENTION_LEVELS:
        logging.info("Running with contention_level: %s", cont_level)

        try:
          benchmark.Benchmark(clock_type, cont_level)
          parse.Parse(_PARSE_CLOCK_MAP[clock_type.upper()],
                      join(_SCRPDIR, "logs", clock_type, cont_level))
        except BenchmarkException as err:
          logging.error("ERROR: BenchmarkException caught: %s", err)
          return False
        except ParseException as err:
          logging.error("ERROR: ParseException caught: %s", err)
          return False

    return True
Пример #2
0
 def testCreateTestsSingle(self):
     b = benchmark.Benchmark([mocks.MockNameServer(mocks.GOOD_IP)],
                             test_count=1)
     results = b.CreateTests(('A mail.google.com', ))
     self.assertEquals(results, [['A', 'mail.google.com']])
     # Oops, this isn't a real tuple.
     self.assertRaises(AssertionError, b.CreateTests, 'google.com')
Пример #3
0
def _main(args: Args) -> int:
    if args.no_warnings:
        warnings.filterwarnings("ignore")
    test = benchmark.Benchmark(bucket=args.bucket)
    test.run()

    return 0
Пример #4
0
 def testCreateTestsChunkRecords(self):
     b = benchmark.Benchmark([mocks.MockNameServer(mocks.GOOD_IP)],
                             test_count=100)
     results = b.CreateTests(('A mail.google.com', 'CNAME test.live.com'),
                             select_mode='chunk')
     self.assertEquals(
         results, [['A', 'mail.google.com'], ['CNAME', 'test.live.com']])
Пример #5
0
 def testCreateTestsWeighted(self):
     b = benchmark.Benchmark([mocks.MockNameServer(mocks.GOOD_IP)],
                             test_count=1000)
     results = b.CreateTests(('google.com', 'live.com'))
     self.assertEquals(len(results), 1000)
     self.assertTrue(('A', 'www.google.com.') in results)
     caches = [x for x in results if 'cache' in x[1]]
     self.assertTrue(len(caches) > 0 and len(caches) < 50)
Пример #6
0
def main():
    image_path = input('>> Caminho para a imagem com extensão: ')
    image = img_process.ImageProcess(image_path)
    huffman_obj = huffman.Huffman(image.img_array)
    save_data.SaveData('results/', image.img_array, huffman_obj.hist,
                       huffman_obj.codes)
    bench = benchmark.Benchmark(image, image.img_array, huffman_obj.codes)
    print('\nRESULTADOS PODEM SER VISUALIZADOS EM "/results"')
Пример #7
0
    def testDigestion(self):
        ns_list = (mocks.MockNameServer(mocks.GOOD_IP),
                   mocks.MockNameServer(mocks.PERFECT_IP),
                   mocks.MockNameServer(mocks.BROKEN_IP),
                   mocks.MockNameServer(mocks.SLOW_IP))
        b = benchmark.Benchmark(ns_list)
        good = ns_list[0].FakeAnswer(None)
        bad = ns_list[0].FakeAnswer(None, no_answer=True)

        b.results = {
            ns_list[0]: [[('www.google.com.', 'A', 2.90, bad),
                          ('google.com.', 'A', 9.80, good),
                          ('www.google.com.', 'A', 9.90, good)],
                         [('www.google.com.', 'A', 9.90, bad),
                          ('google.com.', 'A', 9.90, good),
                          ('www.google.com.', 'A', 9.80, good)]],
            ns_list[1]: [[('www.google.com.', 'A', 3.40, good),
                          ('google.com.', 'A', 3.40, good),
                          ('www.google.com.', 'A', 3.60, good)],
                         [('www.google.com.', 'A', 3.30, good),
                          ('google.com.', 'A', 3.30, good),
                          ('www.google.com.', 'A', 3.40, good)]],
            ns_list[2]: [[('www.google.com.', 'A', 60, None),
                          ('google.com.', 'A', 60, None),
                          ('www.google.com.', 'A', 60, None)],
                         [('www.google.com.', 'A', 60, None),
                          ('google.com.', 'A', 60, None),
                          ('www.google.com.', 'A', 60, None)]],
            ns_list[3]: [[('www.google.com.', 'A', 26.25, good),
                          ('google.com.', 'A', 26.30, good),
                          ('www.google.com.', 'A', 26.10, good)],
                         [('www.google.com.', 'A', 26.40, good),
                          ('google.com.', 'A', 12.40, bad),
                          ('www.google.com.', 'A', 26.80, good)]]
        }

        expected = []
        averages = dict([(x[0].ip, x[1]) for x in b.ComputeAverages()])
        self.assertEquals(averages[mocks.GOOD_IP], 8.7000000000000011)
        self.assertEquals(averages[mocks.PERFECT_IP], 3.4000000000000004)
        self.assertEquals(averages[mocks.BROKEN_IP], 60)
        self.assertEquals(averages[mocks.SLOW_IP], 24.041666666666664)

        expected = [('127.127.127.127', 3.2999999999999998),
                    ('127.0.0.1', 9.80), ('9.9.9.9', 26.10),
                    ('192.168.0.1', 60)]
        fastest = [(x[0].ip, x[1]) for x in b.FastestNameServerResult()]
        self.assertEquals(fastest, expected)

        expected = [
            (None, '####', 3.2999999999999998),
            (None, '##########', 9.8000000000000007),
            (None, '###########################', 26.100000000000001),
            (None,
             '############################################################',
             60)
        ]
        self.assertEquals(b._LowestLatencyAsciiChart(), expected)
Пример #8
0
    def testFastest(self):
        ns_list = (mocks.MockNameServer('X', name='X'),
                   mocks.MockNameServer('O', name='O'),
                   mocks.MockNameServer('U', name='U'))
        b = benchmark.Benchmark(ns_list)
        good = ns_list[0].FakeAnswer(None)
        b.results = {
            ns_list[0]:
            [[('www.microsoft.com.', 'A', 3.0879974365234375, good),
              ('www.youku.com.', 'A', 2.2590160369873047, good),
              ('www.orkut.co.in.', 'A', 25.511980056762695, good),
              ('cache-9.ku6.com.', 'A', 1013.6392116546631, good),
              ('wsj.com.', 'A', 2.3639202117919922, good),
              ('www.imagevenue.com.', 'A', 2.6688575744628906, good),
              ('www.travian.ae.', 'A', 2.5160312652587891, good),
              ('www.fotolog.net.', 'A', 2.6750564575195312, good),
              ('www.torrentz.com.', 'A', 2.7811527252197266, good),
              ('www.wer-kennt-wen.de.', 'A', 2.7070045471191406, good)]],
            ns_list[1]:
            [[('www.microsoft.com.', 'A', 82.499980926513672, good),
              ('www.youku.com.', 'A', 81.991195678710938, good),
              ('www.orkut.co.in.', 'A', 82.377910614013672, good),
              ('cache-9.ku6.com.', 'A', 1141.1499977111816, good),
              ('wsj.com.', 'A', 84.334135055541992, good),
              ('www.imagevenue.com.', 'A', 84.282875061035156, good),
              ('www.travian.ae.', 'A', 84.036111831665039, good),
              ('www.fotolog.net.', 'A', 84.750175476074219, good),
              ('www.torrentz.com.', 'A', 84.517002105712891, good),
              ('www.wer-kennt-wen.de.', 'A', 83.980083465576172, good)]],
            ns_list[2]:
            [[('www.microsoft.com.', 'A', 12.507915496826172, good),
              ('www.youku.com.', 'A', 357.06806182861328, good),
              ('www.orkut.co.in.', 'A', 46.499967575073242, good),
              ('cache-9.ku6.com.', 'A', 697.60799407958984, good),
              ('wsj.com.', 'A', 87.159872055053711, good),
              ('www.imagevenue.com.', 'A', 11.99793815612793, good),
              ('www.travian.ae.', 'A', 11.492013931274414, good),
              ('www.fotolog.net.', 'A', 12.087106704711914, good),
              ('www.torrentz.com.', 'A', 12.598991394042969, good),
              ('www.wer-kennt-wen.de.', 'A', 11.770963668823242, good)]]
        }

        expected = [('G', 2.2590160369873047), ('U', 11.492013931274414),
                    ('O', 81.991195678710938)]
        fastest = [(x[0].ip, x[1]) for x in b.FastestNameServerResult()]
        self.assertEquals(fastest, expected)

        expected = [
            ('X', '##', 2.2590160369873047),
            ('U', '########', 11.492013931274414),
            ('O', '#####################################################',
             81.991195678710938)
        ]
        self.assertEquals(b._LowestLatencyAsciiChart(), expected)
Пример #9
0
    def PrepareBenchmark(self):
        """Setup the benchmark object with the appropriate dataset."""
        if len(self.nameservers) == 1:
            thread_count = 1
        else:
            thread_count = self.options.benchmark_thread_count

        self.bmark = benchmark.Benchmark(self.nameservers,
                                         query_count=self.options.query_count,
                                         run_count=self.options.run_count,
                                         thread_count=thread_count,
                                         status_callback=self.UpdateStatus)
Пример #10
0
def main():
    # GPU utilization
    CONFIG_PATH = "/project/datasets-API/benchmark-package/config/mnist.toml"
    NUM_RUNS = 1
    PATH_TO_CHROME_TRACES = "/project/datasets-API/benchmark-package/chrome-traces/test.json"

    benchmark = bm.Benchmark('data_API', 'LowResFrameClassifier', CONFIG_PATH)
    tot_acc, tot_time = 0, 0
    for i in range(NUM_RUNS):
        tot_time += benchmark.run('training', 500, profile=False)[0]
        # tot_acc += benchmark.run('validation', 1000, profile=False)[1]
    print("RESULTS: Train time=", tot_time / NUM_RUNS,
          ", Validation accuracy=", tot_acc / NUM_RUNS * 100)
Пример #11
0
 def testRun(self):
     ns_list = (mocks.MockNameServer(mocks.GOOD_IP),
                mocks.MockNameServer(mocks.PERFECT_IP),
                mocks.MockNameServer(mocks.BROKEN_IP),
                mocks.MockNameServer(mocks.SLOW_IP))
     b = benchmark.Benchmark(ns_list, test_count=3, run_count=2)
     self.assertRaises(AssertionError, b.Run)
     b.CreateTests(['A www.google.com'])
     self.assertEquals(b.test_data,
                       [['A', 'www.google.com'], ['A', 'www.google.com'],
                        ['A', 'www.google.com']])
     b.Run()
     ips_tested = sorted([x.ip for x in b.results])
     expected = ['127.0.0.1', '127.127.127.127', '192.168.0.1', '9.9.9.9']
     self.assertEquals(ips_tested, expected)
     self.assertEquals(len(b.results[ns_list[0]]), 2)
     self.assertEquals(len(b.results[ns_list[0]][0]), 3)
Пример #12
0
def main():
    perfs = libraries.instantiate_libraries()
    perfs_ab_sorted = sorted(perfs, key=lambda l: l.name)
    bnch = benchmark.Benchmark(perfs_ab_sorted)
    results = []
    for title, config in JSONS.items():
        with config["path"].open('rt', encoding='utf-8') as f:
            cur_jsn = f.read()

        cur_jsn = six.ensure_str(cur_jsn)  # this is needed for python 2
        result = bnch.run(cur_jsn, config.get("times"))
        results.append((title, result))

    chart_data = charts.from_results(
        results, urls=[config['url'] for config in JSONS.values()])
    with open('results.json', 'w') as f:
        json.dump(chart_data, f)
Пример #13
0
 def testNormalRun(self):
     ns_list = (mocks.MockNameServer(mocks.GOOD_IP),
                mocks.MockNameServer(mocks.PERFECT_IP),
                mocks.MockNameServer(mocks.BROKEN_IP),
                mocks.MockNameServer(mocks.SLOW_IP))
     b = benchmark.Benchmark(ns_list, test_count=3, run_count=2)
     b.CreateTests(['google.com', 'live.com'])
     b.Run()
     expected = ['127.0.0.1', '127.127.127.127', '192.168.0.1', '9.9.9.9']
     averages = dict([(x[0].ip, x[1]) for x in b.ComputeAverages()])
     self.assertEquals(len(averages), 4)
     self.assertTrue(averages[mocks.GOOD_IP] >= 8)
     self.assertTrue(averages[mocks.PERFECT_IP] <= 5)
     self.assertTrue(averages[mocks.BROKEN_IP] >= 59)
     self.assertTrue(averages[mocks.SLOW_IP] >= 20)
     self.assertEquals(b.BestOverallNameServer(), ns_list[1])
     self.assertEquals(b.NearestNameServers(count=2),
                       [ns_list[1], ns_list[0]])
Пример #14
0
#!/usr/bin/env python

from __future__ import absolute_import, division, print_function

# ----------------------------
# Imports for other modules --
# ----------------------------
from lsst.qserv.admin import commons
import benchmark

if __name__ == '__main__':

    testdata_dir = '/datapool/tmp/loader_test/test'
    out_dir = '/datapool/tmp/loader_tmp/test'
    config = commons.read_user_config()
    bench = benchmark.Benchmark(testdata_dir, out_dir)
    bench.run()
Пример #15
0
 def testEmptyRun(self):
     ns_list = (mocks.MockNameServer(mocks.GOOD_IP), )
     b = benchmark.Benchmark(ns_list, test_count=3, run_count=2)
     self.assertRaises(AssertionError, b.Run)
Пример #16
0
        os.path.join(cwd, '..', '..', '..', 'utils', 'benchmarks',
                     'tensorflow'))
    import benchmark

    logging.basicConfig(
        level=logging.getLevelName("DEBUG"),
        format='%(asctime)s %(name)s %(levelname)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')

    layer_dict = {}
    utils = {"train": False}

    module = benchmark.Benchmark(
        partial(graph_builder, layer_dict),
        inputs,
        initializer,
        add_args,
        iteration_report,
        partial(initializer_sess, layer_dict, utils),
    )

    # Make the sparse FC layer and insert into the layer dictionary
    # for use by the builder functions:
    options = benchmark.parse_opts(module, False)
    np.random.seed(options.np_seed)
    layer_dict['fc_gen'] = lambda: make_random_sparse_fc_layer(options)

    if options.train:
        utils["train"] = True

    fc = layer_dict['fc_gen']()
    print(
Пример #17
0
import benchmark

game_list = benchmark.Benchmark().get_game()

list(game_list)
Пример #18
0
    firstDB = None
    for group in groups:
        if os.path.exists("chunkMap.txt"):
            os.system("rm chunkMap.txt")
        if len(group["DBs"]) == 0: continue
        firstDB = group["DBs"][0]["name"]
        firstTable = group["DBs"][0]["tables"][0]

        first = True
        for db in group["DBs"]:
            data_dir = db["dir"]
            dbName = db["name"]
            temp_dir = os.path.join(data_dir, "temp")
            config = commons.read_user_config()
            bench = benchmark.Benchmark(data_dir, temp_dir, dbName)
            bench.run()

            # change css here
            if not first:
                cmd = "mysql -D qservCssData --port=%s --socket %s --user=%s --password=%s " % (
                    config['mysqld']['port'], config['mysqld']['socket'],
                    config['mysqld']['user'], config['mysqld']['pass'])
                # Change all other tables
                oldKey = "/DBS/%s/.packed.json" % dbName
                newKey = "/DBS/%s/.packed.json" % firstDB
                dbcmd = '-e "UPDATE qservCssData.kvData css1, qservCssData.kvData css2 SET css1.kvVal=css2.kvVal WHERE css1.kvKey=\'%s\' AND css2.kvVal=\'%s\';"' % (
                    oldKey, newKey)
                print(cmd + dbcmd)
                os.system(cmd + dbcmd)
            first = False
Пример #19
0
import benchmark
import numpy as np

#Instancio la clase benchmark
bench = benchmark.Benchmark()

#Coges todas las funciones como una lista
funciones = bench.getFunciones()

#El tamaño del array puede ser el que quieras
print(funciones[0](np.zeros(10)))
print(funciones[5](np.zeros(30)))

#Devuelve el número de funciones
print(bench.getNumFunciones())

#Devuelve el límite inferior
print(bench.getLimInf())

#Devuelve el límite superior
print(bench.getLimSup())

#Devuelve el valor que da la funciones 2 y 7 en el óptimo
print(bench.getFuncMinValor(2))
print(bench.getFuncMinValor(7))

#Devuelve un diccionario con el límite superior e inferior del dominio y con el valor que da la función 5 en el óptimo.
print(bench.getInfo(5))

#Coge la función 3 y evalúa en [0,...,0]
f3 = bench.getFuncion(3)
Пример #20
0
#                 val = random()
#                 line[index] = val
#                 break
#     return line

if __name__ == "__main__":
    trainpath, testpath, outputname, noise, bags, c, stop_criterion = main(
        sys.argv[1:])
    if not os.path.exists("results/"):
        os.makedirs("results/")
    with open("results/{}.csv".format(outputname), "w") as f:
        f.write(
            "c,stop criterion,bags,noise,classifier,accuracy,precision,recall,f1 score,learnT,testT \n"
        )
    f.close()
    x_train, y_train, x_test, y_test = preprocessor(trainpath,
                                                    testpath,
                                                    noise,
                                                    labels=[0, 1])
    ben = benchmark.Benchmark(x_train, y_train, x_test, y_test)
    ben.run_benchmark(outputname, noise, bags, c, stop_criterion)

    # to override the noise in parameter of the script and run the benchmark on different noise percentages
    # noises = [0.1, 0.15, 0.2, 0.25, 0.5]
    # cs = range(4, 12, 1)
    # for k in cs:
    #     for n in noises:
    #         x_train, y_train, x_test, y_test = preprocessor2(trainpath, testpath, n, labels=[0, 1])
    #         ben = benchmark.Benchmark(x_train, y_train, x_test, y_test)
    #         ben.run_benchmark(outputname, n, bags, c, stop_criterion)
    def benchmark(self, use_default=True, df_benchmark_stats_electricity=None,
                  df_benchmark_stats_fossil_fuel=None):
        """
        This function add Benchmark instances for the current Building instance
        :return:
        """
        print("Start benchamrking")
        if use_default:
            df_sample_bench_stats_e = constants.Constants.df_sample_benchmark_stats_e
            df_sample_bench_stats_f = constants.Constants.df_sample_benchmark_stats_f
        else:
            df_sample_bench_stats_e = df_benchmark_stats_electricity
            df_sample_bench_stats_f = df_benchmark_stats_fossil_fuel

        if (hasattr(self, "im_electricity") and hasattr(self.im_electricity, "coeffs")):
            # Electricity
            self.benchmark_HSL_e = benchmark.Benchmark('beta_hdd',
                                                       self.im_electricity.coeffs['hsl'],
                                                       df_bench_stats=df_sample_bench_stats_e,
                                                       valid=self.im_electricity.coeff_validation['hsl'])
            self.benchmark_HCP_e = benchmark.Benchmark('beta_beth',
                                                       self.im_electricity.coeffs['hcp'],
                                                       df_bench_stats=df_sample_bench_stats_e,
                                                       valid=self.im_electricity.coeff_validation['hcp'])
            self.benchmark_BASE_e = benchmark.Benchmark('beta_base',
                                                        self.im_electricity.coeffs['base'],
                                                        df_bench_stats=df_sample_bench_stats_e,
                                                       valid=self.im_electricity.coeff_validation['base'])
            self.benchmark_CCP_e = benchmark.Benchmark('beta_betc',
                                                       self.im_electricity.coeffs['ccp'],
                                                       df_bench_stats=df_sample_bench_stats_e,
                                                       valid=self.im_electricity.coeff_validation['ccp'])
            self.benchmark_CSL_e = benchmark.Benchmark('beta_cdd',
                                                       self.im_electricity.coeffs['csl'],
                                                       df_bench_stats=df_sample_bench_stats_e,
                                                       valid=self.im_electricity.coeff_validation['csl'])
            self.benchmark_HSL_e.benchmark(plot=False)
            self.benchmark_HCP_e.benchmark(plot=False)
            self.benchmark_BASE_e.benchmark(plot=False)
            self.benchmark_CCP_e.benchmark(plot=False)
            self.benchmark_CSL_e.benchmark(plot=False)
        else:
            self.benchmark_HSL_e = None
            self.benchmark_HCP_e = None
            self.benchmark_BASE_e = None
            self.benchmark_CCP_e = None
            self.benchmark_CSL_e = None
        if (hasattr(self, "im_fossil_fuel") and hasattr(self.im_fossil_fuel, "coeffs")):
            # Need to add default fossil fuel in the constants module !!!
            # Default benchmark stats will be used is no specific benchmark stats are provided
            # Fossil fuel
            self.benchmark_HSL_f = benchmark.Benchmark('beta_hdd',
                                                       self.im_fossil_fuel.coeffs['hsl'],
                                                       df_bench_stats=df_sample_bench_stats_f,
                                                       valid=self.im_fossil_fuel.coeff_validation['hsl'])
            self.benchmark_HCP_f = benchmark.Benchmark('beta_beth',
                                                       self.im_fossil_fuel.coeffs['hcp'],
                                                       df_bench_stats=df_sample_bench_stats_f,
                                                       valid=self.im_fossil_fuel.coeff_validation['hcp'])
            self.benchmark_BASE_f = benchmark.Benchmark('beta_base',
                                                        self.im_fossil_fuel.coeffs['base'],
                                                        df_bench_stats=df_sample_bench_stats_f,
                                                       valid=self.im_fossil_fuel.coeff_validation['base'])
            self.benchmark_CCP_f = benchmark.Benchmark('beta_betc',
                                                       self.im_fossil_fuel.coeffs['ccp'],
                                                       df_bench_stats=df_sample_bench_stats_f,
                                                       valid=self.im_fossil_fuel.coeff_validation['ccp'])
            self.benchmark_CSL_f = benchmark.Benchmark('beta_cdd',
                                                       self.im_fossil_fuel.coeffs['csl'],
                                                       df_bench_stats=df_sample_bench_stats_f,
                                                       valid=self.im_fossil_fuel.coeff_validation['csl'])
            self.benchmark_HSL_f.benchmark(plot=False)
            self.benchmark_HCP_f.benchmark(plot=False)
            self.benchmark_BASE_f.benchmark(plot=False)
            self.benchmark_CCP_f.benchmark(plot=False)
            self.benchmark_CSL_f.benchmark(plot=False)
        else:
            self.benchmark_HSL_f = None
            self.benchmark_HCP_f = None
            self.benchmark_BASE_f = None
            self.benchmark_CCP_f = None
            self.benchmark_CSL_f = None

        # Plot benchmark html sections
        self.benchmarking_bar_hsl_e_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_HSL_e)
        self.benchmarking_bar_hcp_e_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_HCP_e)
        self.benchmarking_bar_base_e_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_BASE_e)
        self.benchmarking_bar_ccp_e_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_CCP_e)
        self.benchmarking_bar_csl_e_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_CSL_e)

        self.benchmarking_bar_hsl_f_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_HSL_f)
        self.benchmarking_bar_hcp_f_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_HCP_f)
        self.benchmarking_bar_base_f_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_BASE_f)
        self.benchmarking_bar_ccp_f_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_CCP_f)
        self.benchmarking_bar_csl_f_html = benchmark.Benchmark.generate_benchmark_bar_html(self.benchmark_CSL_f)
Пример #22
0
import six
import werkzeug
from flask import Flask, request, jsonify
from flask_cors import CORS
from werkzeug.utils import secure_filename

import benchmark
import libraries
from web import charts

app = Flask(__name__, static_folder=None)
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 2
CORS(app)  # todo: fix to receive only from https://jsonperf.com

BNCH = benchmark.Benchmark(
    sorted(libraries.instantiate_libraries(), key=lambda l: l.name))


def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ['json', 'txt']


@app.route("/python3", methods=["POST"])
@app.route("/python2", methods=["POST"])
def test():
    user_file = request.files.get(
        'file')  # type: werkzeug.datastructures.FileStorage
    if not user_file or user_file.filename == '':
        return 'file is missing', 400
Пример #23
0
                    if t_type == "and_title":
                        and_id = "%s#%d" % (docid, i)
                        omim_title[and_id] = t
                        TITLES.write("%s\t%s\t#%s\t%s\n" %
                                     (and_id, t_type, i, t))
                        matches = m.tag_text(and_id, t, t_type)
                        for te in matches:
                            MATCHES.write("%s\t%s\t%s\n" % (and_id, t, te))
                        i += 1
            #m.tag_text(docid, text, "text")
    TITLES.close()
    MATCHES.close()

    raw_mapping = m.get_mapping()

    precision, recall, f1 = benchmark.Benchmark(
        b, "omim_benchmark.tsv").get_performance(raw_mapping)
    sys.stderr.write("Precision: %s Recall: %s F1: %s\n" %
                     (precision, recall, f1))
    #sys.exit()

    for docid in raw_mapping.iterkeys():
        data = raw_mapping[docid]
        if (data.score == None):
            print "%s\t%s\t%s\t%s" % (docid, data.entity, "|".join(
                data.synonyms), omim_title[docid])
        else:
            print "%f\t%s\t%s\t%s\t%s" % (data.score, docid, data.entity,
                                          "|".join(data.synonyms),
                                          omim_title[docid])
Пример #24
0
if __name__ == '__main__':
    # Add benchmark module to path
    cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
    os.sys.path.insert(
        1,
        os.path.join(cwd, '..', '..', '..', 'utils', 'benchmarks',
                     'tensorflow'))
    import benchmark  # noqa: E402
    logging.basicConfig(
        level=logging.getLevelName("DEBUG"),
        format='%(asctime)s %(name)s %(levelname)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')

    module = benchmark.Benchmark(graph_builder, inputs,
                                 tf.global_variables_initializer, add_args,
                                 iteration_report)

    opts = benchmark.parse_opts(module, False)
    np.random.seed(opts.random_seed)

    if opts.shards > 1:
        raise NotImplementedError(
            "--shards option has not been implemented with this example")
    if opts.replicas > 1:
        raise NotImplementedError(
            "--replicas option has not been implemented with this example")

    print(
        f" Dynamic Sparse Transformer Feed-forward Layer {'Train' if opts.train else 'Inference'} Synthetic benchmark.\n"
        f" Batch size {opts.batch_size}.\n"
Пример #25
0
import benchmark
import config_hnsw as hnsw

algo_field_names = ("M", "efConstruction")
algo_field_types = ("INTEGER", "INTEGER")
case_field_names = ("efSearch", )
case_field_types = ("INTEGER", )

bench = benchmark.Benchmark(algo_field_names, algo_field_types,             \
        case_field_names, case_field_types)

for M in hnsw.Ms:
    for efConstruction in hnsw.efConstructions:
        algo_fields = (M, efConstruction)
        key = "HNSW%d-%d" % algo_fields
        parameters = "efConstruction=%d" % efConstruction
        case_fields = []
        for efSearch in hnsw.efSearchs:
            case_fields.append((efSearch, ))
        bench.run(key, parameters, algo_fields, case_fields)
Пример #26
0
                        help="Layer hidden size")
    parser.set_defaults(batches_per_step=1000, steps=5, shards=2)
    return parser


def iteration_report(opts, time):
    return "{:5f} items/sec".format(opts.batch_size * opts.batches_per_step / time)


if __name__ == '__main__':
    sys.path.insert(1, '../../../utils/benchmarks/popart')
    import benchmark

    module = benchmark.Benchmark(
        graph_builder,
        add_args,
        iteration_report
    )

    opts = benchmark.parse_opts(module)

    opts.train = opts.mode == "train"

    # Log Benchmark Message
    print("Popart Multi-IPU {} Synthetic benchmark.\n"
          " Batch size {}.\n"
          " Batches per Step {}.\n"
          " Steps {}.\n"
          " {} IPUs."
          .format(
              {"infer": "Inference", "eval": "Evaluation", "train": "Training"}[opts.mode],
Пример #27
0
ArrayRenderer independent of display """

import time

import numpy as np

from punyty.vector import Vector3
from punyty.objects import Cube
from punyty.renderers import ArrayRenderer
from punyty.scene import Scene

import benchmark

if __name__ == '__main__':
    width = 800
    height = 800
    target_array = np.zeros((height, width, 3))
    scene = Scene()
    cube = Cube()
    scene.add_object(cube)
    renderer = ArrayRenderer(target_array=target_array)

    bench = benchmark.Benchmark(renderer)

    while True:
        t = time.time()
        renderer.render(scene)
        cube.rotate(Vector3(time.time(), 0, 0))
        fps = bench.update(t)
        if fps:
            print(fps)
Пример #28
0
def iteration_report(opts, time):
    return "{:5f} items/sec".format(opts.batch_size * opts.batches_per_step * opts.replicas / time)


if __name__ == '__main__':
    # Add benchmark module to path
    cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
    sys.path.insert(1, os.path.join(cwd, '..', '..', '..', 'utils',
                                    'benchmarks', 'tensorflow'))
    import benchmark

    module = benchmark.Benchmark(
        graph_builder,
        inputs,
        initializer,
        add_args,
        iteration_report
    )

    options = benchmark.parse_opts(module, False)

    if options.shards > 1:
        raise NotImplementedError(
            "--shards option has not been implemented with this example")

    # Log Benchmark Message
    print("Multi-layer LSTM with a dense final layer, {} Benchmark.\n"
          " Batch size {}.\n"
          " Batches per Step {}.\n"
          " Steps {}.\n"
Пример #29
0
from examples import search
import benchmark as bm
import cProfile as profile
from functools import wraps


def test_func():
    for i in range(100):
        x = i**3


if __name__ == '__main__':
    benchmark = bm.Benchmark()
    benchmark.set_funcs(search.search1, search.search2, search.search3)
    benchmark.set_inputs(search.search_inputs, search.search_inputs2)
    benchmark.compare(10)

    code = compile(test_func, '<string>', 'exec')
    profile.run(code)