Ejemplo n.º 1
0
class Experiements:
    def __init__(self, cutoff: int):
        self.benchmarker = Benchmarker()
        self.SQL = SQL_interface()
        self.timestamp = datetime.fromtimestamp(time.time())
        self.cold_cutoff = cutoff  # get from sql when done testing local
        self.avg_exe_time = 0.4  # get from sql

    def run(
        self,
        fux_name: str,
        command: str,
        iterations: int,
        accuracy: int,
        extra=None,
    ):
        self.accuracy = accuracy
        self.iter = iterations

        return

        for i in range(iterations):
            time.sleep(self.cold_cutoff)
            data = function_call(fux_name, command, extra)

    def function_call(self, name: str, command: str, extra=None):
        switcher = {
            'getter': self.benchmarker.request_getter(),
            'putter': self.benchmarker.request_putter(),
            'feed': self.benchjmarker.request_feed_generator(),
            'webview': self.benchmarker.request_feed_webview()
        }

        return switcher.get(name)
 def __init__(self, numb_threads):
     self.threads = numb_threads
     self.exp_iterations = 5
     self.SQL = SQL_Interface()
     self.bench = Benchmarker()
     self.uuid = uuid.uuid1()
     self.cold_time_secs = self.SQL.get_coldtime()
     self.test_data_lists = []
 def __init__(self):
     self.bench = Benchmarker()
     self.sql_interface = SQL_Interface()
     self.exp_iterations = 5
     self.num_items_to_get = 1
     self.cold_time_secs = self.sql_interface.get_coldtime()
     self.uuid = uuid.uuid1()
     self.test_datas = []
Ejemplo n.º 4
0
 def __init__(self, f*x, filename, interval, offset, accuracy: float):
     self.lambda_function = f*x
     self.fux_id = self.get_function_id(f*x)
     self.filename = filename
     self.interval = interval
     self.offset = offset
     self.accuracy = accuracy
     self.bench = Benchmarker()
     self.SQL = SQL_Interface()
     self.uuid = uuid.uuid1()
     self.run()
Ejemplo n.º 5
0
def string_bench():
    """
    Run a benchmark and print the results.
    https://pythonhosted.org/Benchmarker/
    """
    with Benchmarker(1000 * 1000, width=20) as bench:
        s1, s2, s3, s4, s5 = "Haruhi", "Mikuru", "Yuki", "Itsuki", "Kyon"

        @bench('empty-loop')
        def _(bm):
            for i in bm:
                pass

        @bench("string-join")
        def _(bm):
            for i in bm:
                sos = ''.join((s1, s2, s3, s4, s5))

        @bench("string-concat")
        def _(bm):
            for i in bm:
                sos = s1 + s2 + s3 + s4 + s5

        @bench("string-format")
        def _(bm):
            for i in bm:
                sos = '%s%s%s%s%s' % (s1, s2, s3, s4, s5)
Ejemplo n.º 6
0
def run(loop=1000, width=20):
    """
    Main run method
    """
    with Benchmarker(loop * loop, width=width) as bench:
        s1, s2, s3, s4, s5 = "Haruhi", "Mikuru", "Yuki", "Itsuki", "Kyon"

        @bench(None)  # empty loop
        def _(bm):
            for i in bm:
                pass

        @bench("join")
        def _(bm):
            for i in bm:
                sos = ''.join((s1, s2, s3, s4, s5))

        @bench("concat")
        def _(bm):
            for i in bm:
                sos = s1 + s2 + s3 + s4 + s5

        @bench("format")
        def _(bm):
            for i in bm:
                sos = '%s%s%s%s%s' % (s1, s2, s3, s4, s5)
Ejemplo n.º 7
0
def main():
    global infile
    global out_file
    debug = False
    in_filename = sys.argv[1]
    out_filename = sys.argv[2]
    if len(sys.argv) >= 4:
        debug = True
    in_file = open(in_filename, 'r')
    out_file = open(out_filename, 'w')
    bm = Benchmarker()
    with bm('process'):
        t = get_next_int(in_file)

        results = []
        for step in xrange(int(t)):
            n = get_next_int(in_file)
            inherits = {}
            for i in xrange(n):
                line = get_nexts_int(in_file)
                if line[0] > 0:
                    m, inherit = line[0], line[1:]
                    inherits[i + 1] = inherit
                else:
                    inherits[i + 1] = []
            answer = "No"
            dp = {}
            for i in xrange(n):
                if not i + 1 in dp:
                    b, all = is_diamond_inherits(i + 1, inherits, dp)
                    if b:
                        answer = "Yes"
            results.append(answer)
        print_out_int(results, out_file, debug)
Ejemplo n.º 8
0
def main():
    with Benchmarker(width=1) as bench:

        @bench("fibonacci(34):")
        def _(bm):
            for i in bm:
                print("{0}".format(f(34)))
Ejemplo n.º 9
0
def main():

    times = 1000000
    original = [i for i in range(10000)]

    with Benchmarker(times, width=20) as bm:
        a = list(original)
        b = array('i', original)
        c = deque(original)

        @bm('list')
        def _(bench):
            for i in bench:
                a.insert(0, 1)
                a.pop()

        @bm('array')
        def _(bench):
            for i in bench:
                b.insert(0, 1)
                b.pop()

        @bm('deque')
        def _(bench):
            for i in bench:
                c.append(1)
                c.popleft()
class ConcurrentExperiment:
    def __init__(self, numb_threads):
        self.threads = numb_threads
        self.exp_iterations = 5
        self.SQL = SQL_Interface()
        self.bench = Benchmarker()
        self.uuid = uuid.uuid1()
        self.cold_time_secs = self.SQL.get_coldtime()
        self.test_data_lists = []

    def run_experiment(self):

        for i in range(self.exp_iterations):
            # invoke lambdas concurrently
            self.test_data_lists.append(
                self.bench.run_method_concurrently('Getter', {
                    'command': 'get_file_url',
                    'filename': 'red.png'
                }, self.threads))
            # wait for lambdas to be cold
            time.sleep(self.cold_time_secs)

        # add data to db
        for td_list in self.test_data_lists:
            for td in td_list:
                td.description = str(self.uuid) + "|concurrent_experiment"
                self.SQL.insert_test(td)
Ejemplo n.º 11
0
def benchmark(mode, addr, method, loop, params, options, verbose, qsize,
              filter):
    setupLog(verbose)
    mod = getattr(lotrpc, mode)
    cl = mod.Client(addr, json.loads(options))
    arg = json.loads(params)
    q = queue.Queue(qsize)

    with Benchmarker(loop, filter=filter) as bench:

        @bench('sync')
        def callapi(bm):
            try:
                for i in bm:
                    cl.call(method, arg)
            except Exception as e:
                raise Skip("error {}".format(e))

        @bench('thread-pool')
        def tpcall(bm):
            try:
                t = threading.Thread(target=qworker, args=(q, ))
                t.start()
                with ThreadPoolExecutor() as pool:
                    for i in bm:
                        q.put(pool.submit(cl.call, method, arg))
                    q.put(None)
                t.join()
            except Exception as e:
                raise Skip("error {}".format(e))

        @bench('process-pool')
        def ppcall(bm):
            try:
                t = threading.Thread(target=qworker, args=(q, ))
                t.start()
                with ProcessPoolExecutor() as pool:
                    for i in bm:
                        q.put(pool.submit(cl.call, method, arg))
                    q.put(None)
                t.join()
            except Exception as e:
                raise Skip("error {}".format(e))

        @bench('async')
        def asynccall(bm):
            try:
                loop = asyncio.get_event_loop()
                t = threading.Thread(target=qworker2, args=(q, loop))
                t.start()
                for i in bm:
                    q.put(cl.asynccall(loop, method, arg))
                q.put(None)
                t.join()
            except Exception as e:
                raise Skip("error {}".format(e))
class WeakestLinkExperiment:
    def __init__(self):
        self.bench = Benchmarker()
        self.sql_interface = SQL_Interface()
        self.exp_iterations = 5
        self.num_items_to_get = 1
        self.cold_time_secs = self.sql_interface.get_coldtime()
        self.uuid = uuid.uuid1()
        self.test_datas = []

    def run_experiment(self):

        for i in range(self.exp_iterations):

            # initial invoke
            self.test_datas.append(
                self.bench.request_feed_generator(
                    num_items=self.num_items_to_get))

            # wait for lambda to be cold
            time.sleep(self.cold_time_secs)

            # make generator + getter hot
            self.test_datas.append(
                self.bench.request_feed_generator(
                    num_items=self.num_items_to_get))
            # webview lambda will be cold
            self.test_datas.append(
                self.bench.request_feed_webview(
                    num_items=self.num_items_to_get))

            # wait for lambda to be cold
            time.sleep(self.cold_time_secs)

        # save test data in db
        for td in self.test_datas:
            td.description = str(self.uuid) + "|weakest_link_experiment"
            self.sql_interface.insert_test(td)
Ejemplo n.º 13
0
def main():
    global infile
    global out_file
    debug = False
    in_filename = sys.argv[1]
    out_filename = sys.argv[2]
    if len(sys.argv) >= 4:
        debug = True
    in_file = open(in_filename, 'r')
    out_file = open(out_filename, 'w')
    bm = Benchmarker()
    with bm('process'):
        t = get_next_int(in_file)

        results = []
        for step in xrange(int(t)):
            input = get_nexts_int(in_file)
            n, numbers = input[0], input[1:]
            total = sum(numbers)
            ave = (total * 2) / n
            base = n
            subtotal = (total * 2)
            ave = (subtotal) / base
            new_base = n
            new_subtotal = subtotal
            for i, number in enumerate(numbers):
                if number > ave:
                    new_base -= 1
                    new_subtotal -= number
            if base <= new_base:
                pass
            else:
                base = new_base
                subtotal = new_subtotal
                print base
            print step, n, base, ave
            ave = subtotal / base
            answer = []
            needs = []
            for i, number in enumerate(numbers):

                need = (ave - number) * 100
                if need >= 0:
                    answer.append("%.6f" % (need / total))
                else:
                    answer.append("%.6f" % 0.0)
            results.append(" ".join(answer))
        print_out_int(results, out_file, debug)
Ejemplo n.º 14
0
def run(row, col, loop=3):

    df_random = ppd.DataFrame(np.random.randn(row, col))
    df_random.to_csv("random.csv")

    with Benchmarker(loop) as bench:

        @bench(f'original pandas row:{row} col:{col}')
        def original_pandas_read(bm):
            ppd.read_csv('random.csv')

        @bench(f'modin pandas row:{row} col:{col}')
        def modin_read(bm):
            mpd.read_csv('random.csv')

        @bench(f'dask pandas row:{row} col:{col}')
        def dask_read(bm):
            df = ddf.read_csv('random.csv')
            df = df.compute()
Ejemplo n.º 15
0
def bm():
    with Benchmarker(10000, cycle=10, extra=1) as bench:
        @bench('roll3')
        def insertion(bm):
            for i in bm:
                roll3()

        @bench('roll4')
        def insertion(bm):
            for i in bm:
                roll4()

        @bench('roll3')
        def insertion(bm):
            for i in bm:
                roll3()

        @bench('roll4')
        def insertion(bm):
            for i in bm:
                roll4()
Ejemplo n.º 16
0
def output_similar_strings_of_each_line(path, Database):
    number_of_lines = len(open(path).readlines())

    with Benchmarker(width=20) as bench:
        db = Database(CharacterNgramFeatureExtractor(2))
        @bench("initialize database({0} lines)".format(number_of_lines))
        def _(bm):
            with open(path, 'r') as lines:
                for line in lines:
                    strings = line.rstrip('\r\n')
                    db.add(strings)

        @bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
        def _(bm):
            searcher = Searcher(db, CosineMeasure())
            with open(path, 'r') as lines:
                for i, line in enumerate(lines):
                    if i >= SEARCH_COUNT_LIMIT:
                        break
                    strings = line.rstrip('\r\n')
                    result = searcher.search(strings, 0.8)
Ejemplo n.º 17
0
def main():
  global infile
  global out_file
  debug = False
  in_filename = sys.argv[1]
  out_filename = sys.argv[2]
  if len(sys.argv)>=4:
    debug = True
  in_file = open(in_filename, 'r')
  out_file = open(out_filename, 'w')
  
  bm = Benchmarker()
  #if 1:
  with bm('process'):
    t = get_next_int(in_file)
    
    results = []
    for step in xrange(int(t)):
      n, x, y = get_nexts_int(in_file)
      answer = solve(n, 0,0,0, x, y,1.0)
      results.append("%0.7f" % answer)
    print_out(results, out_file,debug)
Ejemplo n.º 18
0
def main():
  global infile
  global out_file
  debug = False
  in_filename = sys.argv[1]
  out_filename = sys.argv[2]
  if len(sys.argv)>=4:
    debug = True
  in_file = open(in_filename, 'r')
  out_file = open(out_filename, 'w')
  bm = Benchmarker()
  with bm('process'):
    t = get_next_int(in_file)

    results = []
    for step in xrange(int(t)):
      input = get_nexts_int(in_file)
      n, numbers = input[0], input[1:]
      answer = solve(n, numbers)
      results.append(answer)
    print results
    print_out_int(results, out_file,debug)
Ejemplo n.º 19
0
def bench_two_arg(arg):
    for funcname in ("join", ):
        if arg is not None and arg != funcname:
            continue
        n = N
        with Benchmarker(n, width=30) as b:

            @b("native.%s" % (funcname))
            def _(bm):
                func = getattr(os.path, funcname)
                for i in bm:
                    func(DIR_PATH, FILE_PATH)
                    func(B_DIR_PATH, B_FILE_PATH)

            @b("rust.%s" % (funcname))
            def _(bm):
                func = getattr(fpath, funcname)
                for i in bm:
                    func(DIR_PATH, FILE_PATH)
                    func(B_DIR_PATH, B_FILE_PATH)

        print("=*=" * 40)
Ejemplo n.º 20
0
def bench_one_arg(arg):
    for funcname in ("abspath", "basename", "dirname", "isabs", "islink",
                     "exists", "lexists", "split", "splitext", "relpath",
                     "normpath", "realpath", "expanduser", "expandvars"):
        if arg is not None and arg != funcname:
            continue
        # benchmark of file system dependent
        n = N if funcname not in ("islink", "lexists", "exists", "realpath",
                                  "relpath") else 50
        with Benchmarker(n, width=30) as b:

            @b("native.%s" % (funcname), tag=funcname)
            def _(bm):
                func = getattr(os.path, funcname)
                for i in bm:
                    func(ABS)
                    func(B_FILE_PATH)
                    func(FILENAME)
                    func(FILE_PATH)
                    func(DIR_PATH)
                    func(SEPEND_DIR_PATH)
                    func(B_EXPAND_PATH)
                    func(EXPAND_PATH)

            @b("rust.%s" % (funcname), tag=funcname)
            def _(bm):
                func = getattr(fpath, funcname)
                for i in bm:
                    func(ABS)
                    func(B_FILE_PATH)
                    func(FILENAME)
                    func(FILE_PATH)
                    func(DIR_PATH)
                    func(SEPEND_DIR_PATH)
                    func(B_EXPAND_PATH)
                    func(EXPAND_PATH)

        print("=*=" * 40)
Ejemplo n.º 21
0
def main():
  global infile
  global out_file
  debug = False
  in_filename = sys.argv[1]
  out_filename = sys.argv[2]
  if len(sys.argv)>=4:
    debug = True
  in_file = open(in_filename, 'r')
  out_file = open(out_filename, 'w')
  bm = Benchmarker()
  #if 1:
  global dp
  with bm('process'):
    T = get_next_int(in_file)
    
    results = []
    for step in xrange(int(T)):
      a, n = get_nexts_int(in_file)
      motes = get_nexts_int(in_file)
      dp = {}
      answer = solve(a, tuple(sorted(motes)))
      results.append(answer)
    print_out_int(results, out_file,debug)
Ejemplo n.º 22
0
def exebench(width):
    """
    benchorg.jpg is
    'http://upload.wikimedia.org/wikipedia/commons/d/df/SAND_LUE.jpg'
    """
    height = width * 2 / 3
    with Benchmarker(width=30, loop=N) as bm:
        for i in bm('kaa.imlib2'):
            imlib2_scale('benchorg.jpg', width, height)
        for i in bm("PIL"):
            pil_scale('benchorg.jpg', width, height)
        for i in bm("pgmagick(blob-read)"):
            pgmagick_scale_from_blob('benchorg.jpg', width, height)
        for i in bm("pgmagick(normal-read)"):
            pgmagick_scale('benchorg.jpg', width, height)
        for i in bm("pgmagick(scale+sharpen)"):
            pgmagick_scale_plus_sharpen('benchorg.jpg', width, height)
        for i in bm("opencv"):
            opencv_scale('benchorg.jpg', width, height)
        for i in bm("pyimlib2"):
            pyimlib2_scale('benchorg.jpg', width, height)
        for i in bm("pyimlib2_with_pgsharpen"):
            pyimlib2_scale_with_pgmagicksharpen('benchorg.jpg', width, height)
    return bm.results
Ejemplo n.º 23
0
#!/usr/bin/env python

## This script was derived from
## 'examples/phase/anisotropy/input.py'

if __name__ == "__main__":
    
    from fipy.tools.parser import parse

    numberOfElements = parse('--numberOfElements', action = 'store',
                          type = 'int', default = 40)
    from fipy.tools import numerix
    N = int(numerix.sqrt(numberOfElements))

    from benchmarker import Benchmarker
    bench = Benchmarker()

    bench.start()

    Length = N * 2.5 / 100.
    nx = N
    ny = N
    dx = Length / nx
    dy = Length / ny
    radius = Length / 4.
    seedCenter = (Length / 2., Length / 2.)
    initialTemperature = -0.4
    from fipy.meshes.grid2D import Grid2D
    mesh = Grid2D(dx=dx, dy=dy, nx=nx, ny=ny)

    bench.stop('mesh')
Ejemplo n.º 24
0
parser.add_argument('--install', choices=['client', 'server', 'all'], default='all', help='Allows you to only install the server or client software')
parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--next-sort', action='store_true', default=False, help='displays the next value that can be used as a sort value')
parser.add_argument('--max-concurrency', default=256, help='the maximum concurrency that the tests will run at. The query tests will run at this concurrency', type=int)
parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
parser.add_argument('--query-interval', default=5, type=int)
parser.add_argument('--max-threads', default=8, help='The max number of threads to run weight at, this should be set to the number of cores for your system.', type=int)
parser.add_argument('--duration', default=60, help='Time in seconds that each test should run for.')
parser.add_argument('--starting-concurrency', default=8, type=int)
parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
  benchmarker.run_list_tests()
elif benchmarker.next_sort:
  benchmarker.next_sort_value()
elif benchmarker.parse != None:
  benchmarker.parse_timestamp()
else:
  benchmarker.run()
Ejemplo n.º 25
0
    return i


def pivot(a, l, r):
    return a[r]


def random_data(size):
    ary = range(size)
    random.shuffle(ary)
    return ary


if __name__ == '__main__':
    sys.setrecursionlimit(100000)
    with Benchmarker(width=35) as bm:
        ary = random_data(1000)
        with bm('random:size=1000'):
            sort(ary)
        ary = random_data(10000)
        with bm('random:size=10000'):
            sort(ary)
        ary = random_data(100000)
        with bm('random:size=100000'):
            sort(ary)

        ex = random_data(100)
        ary = range(900)
        ary.extend(ex)
        with bm('already sort of 90%:size=1000'):
            sort(ary)
Ejemplo n.º 26
0
# validate_homework()
# train_X, test_X, train_y, test_y = load_mnist()
# train_X_mini = train_X[:1000]
# train_y_mini = train_y[:1000]
# test_X_mini = test_X[:100]
# test_y_mini = test_y[:100]

# norm = np.linalg.norm
# def dist(x, train_X):
#     return [norm(tx - x) for tx in train_X]

# k = 5
# d = [dist(x, train_X_mini) for x in test_X_mini]

# nparray = np.array
# knind = [list(nparray(x).argsort()[:k]) for x in d]
# kny = [list(train_y_mini[x]) for x in knind]
# # county = [[[y, x.count(y)] for y in list(set(x))] for x in kny]
# # pred_y_mini = [nparray(x)[nparray(x)[:,1].argsort()[-1:]][0][0] for x in county]
# import collections
# pred_y_mini = [collections.Counter(x).most_common(1)[0][0] for x in kny]
# print((test_y_mini==pred_y_mini).tolist().count(True))

from benchmarker import Benchmarker
loop = 1000
with Benchmarker(loop, width=20) as bench:

    @bench('default')
    def default_vh(bm):
        validate_homework()
Ejemplo n.º 27
0
from benchmarker import Benchmarker
"""
Objects data benchmarks.
"""

if __name__ == "__main__":
    b = Benchmarker()

    header = "| Test | FPS |\n| --- | --- |\n"
    output = header[:]

    rows = []

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
              passes="_img",
              row="--boxes --images --passes _img"))

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
              passes="_id",
              row="--boxes --images --passes _id"))

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
Ejemplo n.º 28
0
from benchmarker import Benchmarker

import brica1

with Benchmarker(width=20, loop=10000, cycle=20) as bench:
    @bench(None)
    def _(bm):
        for _ in bm:
            pass

    @bench('constant_null_small')
    def _(bm):
        s = brica1.VirtualTimeSyncScheduler(1.0)
        agent = brica1.Agent(s)

        compA = brica1.ConstantComponent()
        compB = brica1.NullComponent()
        mod = brica1.Module();

        mod.add_component('compA', compA)
        mod.add_component('compB', compB)

        compA.make_out_port('out', 1)
        compB.make_in_port('in', 1)

        brica1.connect((compA, 'out'), (compB, 'in'))

        agent.add_submodule('mod', mod)

        for _ in bm:
            agent.step()
Ejemplo n.º 29
0
# coding: utf-8
from benchmarker import Benchmarker
import os
import tempfile
import unicodecsv
import csv
import fcsv
import pandas as pd

NUM = 10 * 1000
source = [["abc", "def", "ghi"], ["jkl", "あいう", "opq"], ["vvv", "v1", "v2"]]
source = (("abc", "def", "ghi"), ("jkl", "あいう", "opq"), ("vvv", "v1", "v2"))

with Benchmarker(NUM, width=40) as bench:

    @bench("std.writerows")
    def b_std_writerows(bm):
        for i in bm:
            with open('b_std.csv', 'w') as out:
                writer = csv.writer(out, quoting=csv.QUOTE_NONNUMERIC)
                writer.writerows(source)

    @bench("std.writerow")
    def b_std_writerow(bm):
        for i in bm:
            with open('b_std.csv', 'w') as out:
                writer = csv.writer(out, quoting=csv.QUOTE_NONNUMERIC)
                writer.writerow(source[0])
                writer.writerow(source[1])
                writer.writerow(source[2])
Ejemplo n.º 30
0
#! /usr/bin/env python
# coding:utf-8

from __future__ import division
import heapq
import bintrees
import random

if __name__ == '__main__':
    from benchmarker import Benchmarker
    from itertools import repeat, izip
    from bintrees import FastRBTree

    # initialize heapq
    h = range(10000)
    heapq.heapify(h)
    # initialize AVLTree
    m = izip(xrange(10000), repeat(True))
    t = FastRBTree(m)

    for bm in Benchmarker(width=20, loop=100000, cycle=3, extra=1):
        for _ in bm.empty():
            pass
        for _ in bm('heapq'):
            heapq.heappop(h)
            heapq.heappush(h, random.randint(-100000, 100000))
        for _ in bm('FastRBTree'):
            t.pop_min()
            t[random.randint(-100000, 100000)] = True
Ejemplo n.º 31
0

# Python
def py_xor(bytes1, bytes2):
    return bytearray([c1 ^ c2 for (c1, c2) in zip(bytes1, bytes2)])


# PyCryptodome
cryptodome_xor = strxor.strxor


def initdata():
    return secrets.token_bytes(2048), secrets.token_bytes(2048)


with Benchmarker(10000, width=20) as bench:

    @bench('c_xor')
    def _(bm):
        for i in bm:
            b1, b2 = initdata()
            c_xor(b1, b2)

    @bench('py_xor')
    def _(bm):
        for i in bm:
            b1, b2 = initdata()
            py_xor(b1, b2)

    @bench('pycrypto_xor')
    def _(bm):
Ejemplo n.º 32
0
            continue
        actual = func(items)
        if actual != expected:
            import difflib
            for x in difflib.unified_diff(expected,
                                          actual,
                                          'expected',
                                          'actual',
                                          n=2):
                print repr(x)
            sys.exit(1)

##
## do benchmarks
##
for bm in Benchmarker(loop=10000, cycle=1, extra=0):
    for func in benchmark_functions:
        if func._skip:
            bm.skip(func, func._skip)
        else:
            bm.run(func, items)

##
## output example
##
# $ python bench_pycode.py -n 10000 -c 5 -X 1 -q
# ## benchmarker:       release 0.0.0 (for python)
# ## python platform:   darwin [GCC 4.2.1 (Apple Inc. build 5659)]
# ## python version:    2.5.5
# ## python executable: /usr/local/python/2.5.5/bin/python
#
Ejemplo n.º 33
0
Archivo: mesh.py Proyecto: ghorn/Eg
 # ########################################################################
 ##

r"""
This example benchmarks the speed and memory usage of creating a mesh. Run:
    
    $ python setup.py efficiency_test
"""
__docformat__ = 'restructuredtext'

if __name__ == "__main__":
        
    from fipy.tools.parser import parse

    from benchmarker import Benchmarker
    bench = Benchmarker()

    numberOfElements = parse('--numberOfElements', action = 'store', type = 'int', default = 100)

    bench.start()

    from fipy.tools import numerix
    nx = int(numerix.sqrt(numberOfElements))
    ny = nx
    dx = 1.
    dy = 1.

    from fipy.meshes.grid2D import Grid2D
    mesh = Grid2D(nx = nx, ny = nx, dx = dx, dy = dy)

    bench.stop('mesh')