示例#1
0
def bench_pyflake(loops, filename):
    input_fp = open(filename, 'rb')
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        input_fp.seek(0)
        field = RBitfield(input_fp)

        magic = field.readbits(16)
        if magic == 0x1f8b:  # GZip
            out = gzip_main(field)
        elif magic == 0x425a:  # BZip2
            out = bzip2_main(field)
        else:
            raise Exception("Unknown file magic %x, not a gzip/bzip2 file"
                            % hex(magic))

    dt = perf.perf_counter() - t0
    input_fp.close()

    if hashlib.md5(out).hexdigest() != "afa004a630fe072901b1d9628b960974":
        raise Exception("MD5 checksum mismatch")

    return dt
示例#2
0
def bench_pyflake(loops, filename):
    input_fp = open(filename, 'rb')
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        input_fp.seek(0)
        field = RBitfield(input_fp)

        magic = field.readbits(16)
        if magic == 0x1f8b:  # GZip
            out = gzip_main(field)
        elif magic == 0x425a:  # BZip2
            out = bzip2_main(field)
        else:
            raise Exception("Unknown file magic %x, not a gzip/bzip2 file" %
                            hex(magic))

    dt = perf.perf_counter() - t0
    input_fp.close()

    if hashlib.md5(out).hexdigest() != "afa004a630fe072901b1d9628b960974":
        raise Exception("MD5 checksum mismatch")

    return dt
示例#3
0
def _test(spawn, sleep, options):
    global counter
    counter = 0
    before_spawn = perf.perf_counter()
    for _ in xrange(N):
        spawn(incr, sleep, **options.kwargs)

    before_sleep = perf.perf_counter()
    if options.sleep:
        assert counter == 0, counter
        sleep(0)
        after_sleep = perf.perf_counter()
        assert counter == N, (counter, N)
    else:
        after_sleep = before_sleep


    if options.join:
        before_join = perf.perf_counter()
        options.join()
        after_join = perf.perf_counter()
        join_duration = after_join - before_join
    else:
        join_duration = -1

    return Times(before_sleep - before_spawn,
                 after_sleep - before_sleep,
                 join_duration)
示例#4
0
def bench_pickle(loops, pickle, options):
    range_it = xrange(loops)

    # micro-optimization: use fast local variables
    dumps = pickle.dumps
    objs = (DICT, TUPLE, DICT_GROUP)
    protocol = options.protocol
    t0 = perf.perf_counter()

    for _ in range_it:
        for obj in objs:
            # 20 dumps
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)

    return perf.perf_counter() - t0
示例#5
0
def test_calls(loops):
    a = Foo()
    b = Bar()
    c = Baz()
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # 18 calls
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)

    return perf.perf_counter() - t0
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        cur = Person.delete()
        cur.execute()

        cur = Address.delete()
        cur.execute()

        # Run the benchmark once
        t0 = perf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person.insert()
            new_person.execute(name="name %i" % i)

            # Insert an Address in the address table
            new_address = Address.insert()
            new_address.execute(post_code='%05i' % i)

        # do 'npeople' queries per insert
        for i in xrange(npeople):
            cur = Person.select()
            cur.execute()

        total_dt += (perf.perf_counter() - t0)

    return total_dt
示例#7
0
def bench_formatted_output(loops, logger, stream):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    fmt = FORMAT
    msg = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)

    dt = perf.perf_counter() - t0

    lines = stream.getvalue().splitlines()
    if len(lines) != loops * 10:
        raise ValueError("wrong number of lines")

    return dt
示例#8
0
def bench_simple_output(loops, logger, stream):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    m = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)
        logger.warn(m)

    dt = perf.perf_counter() - t0

    lines = stream.getvalue().splitlines()
    if len(lines) != loops * 10:
        raise ValueError("wrong number of lines")

    return dt
示例#9
0
def bench_silent(loops, logger, stream):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    m = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)

    dt = perf.perf_counter() - t0

    if len(stream.getvalue()) != 0:
        raise ValueError("stream is expected to be empty")

    return dt
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        cur = Person.delete()
        cur.execute()

        cur = Address.delete()
        cur.execute()

        # Run the benchmark once
        t0 = perf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person.insert()
            new_person.execute(name="name %i" % i)

            # Insert an Address in the address table
            new_address = Address.insert()
            new_address.execute(post_code='%05i' % i)

        # do 'npeople' queries per insert
        for i in xrange(npeople):
            cur = Person.select()
            cur.execute()

        total_dt += (perf.perf_counter() - t0)

    return total_dt
示例#11
0
def test_calls(loops):
    f = Foo()
    if hasattr(f, '__dict__'):
        raise Exception("f has a __dict__ attribute!")
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # 20 calls
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)

    return perf.perf_counter() - t0
示例#12
0
def main(loops, level):
    board, solution = LEVELS[level]
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    stream = StringIO()

    board = board.strip()
    expected = solution.rstrip()

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        stream = StringIO()
        solve_file(board, strategy, order, stream)
        output = stream.getvalue()
        stream = None

    dt = perf.perf_counter() - t0

    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s"
                             % (output, expected))

    return dt
示例#13
0
def bench_etree(iterations, etree, bench_func):
    xml_root = build_xml_tree(etree)
    xml_data = etree.tostring(xml_root)

    # not using NamedTemporaryFile() here as re-opening it is not portable
    tf, file_path = tempfile.mkstemp()
    try:
        etree.ElementTree(xml_root).write(file_path)

        t0 = perf.perf_counter()

        for _ in xrange(iterations):
            bench_func(etree, file_path, xml_data, xml_root)

        dt = perf.perf_counter() - t0
    finally:
        try:
            os.close(tf)
        except EnvironmentError:
            pass
        try:
            os.unlink(file_path)
        except EnvironmentError:
            pass

    return dt
示例#14
0
    def test(self, loops):

        s = u''.join(map(unichr, range(20)))
        t = u''.join(map(unichr, range(100)))
        u = u''.join(map(unichr, range(500)))
        v = u''.join(map(unichr, range(1000)))
        range_it = xrange(loops)
        t0 = perf.perf_counter()

        for _ in range_it:

            s.lower()
            s.lower()
            s.lower()
            s.lower()
            s.lower()

            s.upper()
            s.upper()
            s.upper()
            s.upper()
            s.upper()

            s.title()
            s.title()
            s.title()
            s.title()
            s.title()

            t.lower()
            t.lower()
            t.lower()
            t.lower()

            t.upper()
            t.upper()
            t.upper()
            t.upper()

            t.title()
            t.title()
            t.title()
            t.title()

            u.lower()
            u.lower()

            u.upper()
            u.upper()

            u.title()
            u.title()

            v.lower()

            v.upper()

            v.title()

        return perf.perf_counter() - t0
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        session.query(Person).delete(synchronize_session=False)
        session.query(Address).delete(synchronize_session=False)

        # Run the benchmark once
        t0 = perf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person(name="name %i" % i)
            session.add(new_person)
            session.commit()

            # Insert an Address in the address table
            new_address = Address(post_code='%05i' % i, person=new_person)
            session.add(new_address)
            session.commit()

        # do 100 queries per insert
        for i in xrange(npeople):
            session.query(Person).all()

        total_dt += (perf.perf_counter() - t0)

    return total_dt
示例#16
0
    def test(self, loops):

        error = ValueError
        be = self.BlockExceptions()
        range_it = xrange(loops)
        t0 = perf.perf_counter()

        for _ in range_it:
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")

        return perf.perf_counter() - t0
示例#17
0
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        session.query(Person).delete(synchronize_session=False)
        session.query(Address).delete(synchronize_session=False)

        # Run the benchmark once
        t0 = perf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person(name="name %i" % i)
            session.add(new_person)
            session.commit()

            # Insert an Address in the address table
            new_address = Address(post_code='%05i' % i, person=new_person)
            session.add(new_address)
            session.commit()

        # do 100 queries per insert
        for i in xrange(npeople):
            session.query(Person).all()

        total_dt += (perf.perf_counter() - t0)

    return total_dt
示例#18
0
def bench_logging_silent(loops, logger, stream):
    # micro-optimization: use fast local variables
    m = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)

    dt = perf.perf_counter() - t0

    if stream.getvalue():
        raise ValueError("stream is expected to be empty")

    return dt
示例#19
0
def bench_pathlib(loops, tmp_path):
    base_path = pathlib.Path(tmp_path)

    # Warm up the filesystem cache and keep some objects in memory.
    path_objects = list(base_path.iterdir())
    # FIXME: does this code really cache anything?
    for p in path_objects:
        p.stat()
    assert len(path_objects) == NUM_FILES, len(path_objects)

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # Do something simple with each path.
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()

    return perf.perf_counter() - t0
示例#20
0
def bench_formatted_output(loops, logger, stream, check):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    fmt = FORMAT
    msg = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)
        logger.warn(fmt, msg)

    dt = perf.perf_counter() - t0

    if check:
        lines = stream.getvalue().splitlines()
        if len(lines) != loops * 10:
            raise ValueError("wrong number of lines")

    return dt
示例#21
0
def test_calls(loops):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for loops in range_it:
        # 20 calls
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)

    return perf.perf_counter() - t0
示例#22
0
def bench_pickle(loops, pickle, options):
    range_it = xrange(loops)

    # micro-optimization: use fast local variables
    dumps = pickle.dumps
    objs = (DICT, TUPLE, DICT_GROUP)
    protocol = options.protocol
    t0 = perf.perf_counter()

    for _ in range_it:
        for obj in objs:
            # 20 dumps
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)

    return perf.perf_counter() - t0
def bench_regex_effbot(loops):
    if bench_regex_effbot.data is None:
        bench_regex_effbot.data = init_benchmarks()
    data = bench_regex_effbot.data

    range_it = xrange(loops)
    search = re.search
    t0 = perf.perf_counter()

    for _ in range_it:
        # Runs all of the benchmarks for a given value of n.
        for regex, string in data:
            # search 10 times
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)

    return perf.perf_counter() - t0
示例#24
0
def bench_etree(iterations, etree, bench_func):
    xml_root = build_xml_tree(etree)
    xml_data = etree.tostring(xml_root)

    # not using NamedTemporaryFile() here as re-opening it is not portable
    tf, file_path = tempfile.mkstemp()
    try:
        etree.ElementTree(xml_root).write(file_path)

        t0 = perf.perf_counter()

        for _ in xrange(iterations):
            bench_func(etree, file_path, xml_data, xml_root)

        dt = perf.perf_counter() - t0
    finally:
        try:
            os.close(tf)
        except EnvironmentError:
            pass
        try:
            os.unlink(file_path)
        except EnvironmentError:
            pass

    return dt
示例#25
0
def main(loops, level):
    board, solution = LEVELS[level]
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    stream = StringIO()

    board = board.strip()
    expected = solution.rstrip()

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        stream = StringIO()
        solve_file(board, strategy, order, stream)
        output = stream.getvalue()
        stream = None

    dt = perf.perf_counter() - t0

    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s" %
                             (output, expected))

    return dt
示例#26
0
def bench_regex_effbot(loops):
    if bench_regex_effbot.data is None:
        bench_regex_effbot.data = init_benchmarks()
    data = bench_regex_effbot.data

    range_it = xrange(loops)
    search = re.search
    t0 = perf.perf_counter()

    for _ in range_it:
        # Runs all of the benchmarks for a given value of n.
        for regex, string in data:
            # search 10 times
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)
            search(regex, string)

    return perf.perf_counter() - t0
示例#27
0
def bench_pathlib(loops, tmp_path):
    base_path = pathlib.Path(tmp_path)

    # Warm up the filesystem cache and keep some objects in memory.
    path_objects = list(base_path.iterdir())
    # FIXME: does this code really cache anything?
    for p in path_objects:
        p.stat()
    assert len(path_objects) == NUM_FILES, len(path_objects)

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # Do something simple with each path.
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()

    return perf.perf_counter() - t0
示例#28
0
def bench_silent(loops, logger, stream, check):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    m = MESSAGE
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)

    dt = perf.perf_counter() - t0

    if check and len(stream.getvalue()) != 0:
        raise ValueError("stream is expected to be empty")

    return dt
示例#29
0
def test_calls(loops):
    f = Foo()
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        # 20 calls
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)

    return perf.perf_counter() - t0
示例#30
0
def _test(spawn, sleep, options):
    global counter
    counter = 0
    before_spawn = perf.perf_counter()
    for _ in xrange(N):
        spawn(incr, sleep, **options.kwargs)

    before_sleep = perf.perf_counter()
    if options.sleep:
        assert counter == 0, counter
        sleep(0)
        after_sleep = perf.perf_counter()
        assert counter == N, (counter, N)
    else:
        after_sleep = before_sleep

    if options.join:
        before_join = perf.perf_counter()
        options.join()
        after_join = perf.perf_counter()
        join_duration = after_join - before_join
    else:
        join_duration = -1

    return Times(before_sleep - before_spawn, after_sleep - before_sleep,
                 join_duration)
示例#31
0
def _sendall(loops, conn, data):
    start = perf.perf_counter()
    for __ in range(loops):
        for _ in range(N):
            conn.sendall(data)
    taken = perf.perf_counter() - start
    conn.close()
    return taken
示例#32
0
def bench_MonteCarlo(loops, Num_samples):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        MonteCarlo(Num_samples)

    return perf.perf_counter() - t0
示例#33
0
def bench_MonteCarlo(loops, Num_samples):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        MonteCarlo(Num_samples)

    return perf.perf_counter() - t0
示例#34
0
def bench_MonteCarlo(loops, Num_samples):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    print('call bench MonteCarlo(%d, %d)' % (loops, Num_samples))
    for _ in range_it:
        MonteCarlo(Num_samples)

    return perf.perf_counter() - t0
示例#35
0
def bench_SOR(loops, n, cycles, Array):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        G = Array(n, n)
        SOR_execute(1.25, G, cycles, Array)

    return perf.perf_counter() - t0
示例#36
0
def bench_SOR(loops, n, cycles, Array):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        G = Array(n, n)
        SOR_execute(1.25, G, cycles, Array)

    return perf.perf_counter() - t0
示例#37
0
def test_iterative_count(loops, num_threads):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        for _ in xrange(num_threads):
            count()

    return perf.perf_counter() - t0
示例#38
0
def bench_startup(loops, command):
    run = subprocess.check_call
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        run(command)

    return perf.perf_counter() - t0
示例#39
0
def bench_decompress(loops):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        zlib.decompress(COMPRESSED)

    dt = perf.perf_counter() - t0

    return dt
示例#40
0
def bench_compress(loops):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        zlib.compress(DATA)

    dt = perf.perf_counter() - t0

    return dt
示例#41
0
def template(loops, n):
    range_it = range(loops)
    range_x = range(n)

    t0 = perf.perf_counter()

    for _ in range_it:
        set(range_x)

    return perf.perf_counter() - t0
示例#42
0
def emit_warning(loops):
    warn_func = warnings.warn
    category = Warning
    range_it = xrange(loops)

    start_time = perf.perf_counter()
    for _ in range_it:
        warn_func('test', category)
    dt = perf.perf_counter() - start_time
    return dt
示例#43
0
def bench_SOR(loops, n, cycles, Array):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    print('call bench SOR(%d, %d, %d)' % (loops, n, cycles))
    for _ in range_it:
        G = Array(n, n)
        SOR_execute(1.25, G, cycles, Array)

    return perf.perf_counter() - t0
示例#44
0
def bench_apply(loops):
    pool = ThreadPool(1)
    t0 = perf.perf_counter()

    for _ in xrange(loops):
        for _ in xrange(N):
            pool.apply(noop)

    pool.join()
    pool.kill()
    return perf.perf_counter() - t0
示例#45
0
def _map(pool, pool_func, loops):
    data = [1] * N
    t0 = perf.perf_counter()

    # Must collect for imap to finish
    for _ in xrange(loops):
        list(pool_func(identity, data))

    pool.join()
    pool.kill()
    return perf.perf_counter() - t0
示例#46
0
def bench_regex_compile(loops, regexes):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        for regex, flags in regexes:
            re.purge()
            # ignore result (compiled regex)
            re.compile(regex, flags)

    return perf.perf_counter() - t0
示例#47
0
def bench_LU(cycles, N):
    rnd = Random(7)
    A = rnd.RandomMatrix(ArrayList(N, N))
    lu = ArrayList(N, N)
    pivot = array('i', [0]) * N
    range_it = xrange(cycles)
    t0 = perf.perf_counter()

    for _ in range_it:
        LU(lu, A, pivot)

    return perf.perf_counter() - t0
示例#48
0
def bench_genshi(loops, tmpl_cls, tmpl_str):
    tmpl = tmpl_cls(tmpl_str)
    table = [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10)
             for _ in range(1000)]
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        stream = tmpl.generate(table=table)
        stream.render()

    return perf.perf_counter() - t0
示例#49
0
def _bench_spawn(module, loops, close_fds=True):
    total = 0
    for _ in range(loops):
        t0 = perf.perf_counter()
        procs = [module.Popen('/usr/bin/true', close_fds=close_fds)
                 for _ in range(N)]
        t1 = perf.perf_counter()
        for p in procs:
            p.communicate()
            p.poll()
        total += (t1 - t0)
    return total
示例#50
0
def bench_regex_dna(loops, seq, expected_res):
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for i in range_it:
        res = run_benchmarks(seq)

    dt = perf.perf_counter() - t0
    if (expected_res is not None) and (res != expected_res):
        raise Exception("run_benchmarks() error")

    return dt
示例#51
0
def SparseCompRow_matmult(M, y, val, row, col, x, num_iterations):
    range_it = xrange(num_iterations)
    t0 = perf.perf_counter()

    for _ in range_it:
        for r in xrange(M):
            sa = 0.0
            for i in xrange(row[r], row[r + 1]):
                sa += x[col[i]] * val[i]
            y[r] = sa

    return perf.perf_counter() - t0
示例#52
0
def bench_telco(loops, filename):
    getcontext().rounding = ROUND_DOWN
    rates = list(map(Decimal, ('0.0013', '0.00894')))
    twodig = Decimal('0.01')
    Banker = Context(rounding=ROUND_HALF_EVEN)
    basictax = Decimal("0.0675")
    disttax = Decimal("0.0341")

    with open(filename, "rb") as infil:
        data = infil.read()

    infil = io.BytesIO(data)
    outfil = six.StringIO()

    start = perf.perf_counter()
    for _ in range(loops):
        infil.seek(0)

        sumT = Decimal("0")   # sum of total prices
        sumB = Decimal("0")   # sum of basic tax
        sumD = Decimal("0")   # sum of 'distance' tax

        for i in xrange(5000):
            datum = infil.read(8)
            if datum == '':
                break
            n, =  unpack('>Q', datum)

            calltype = n & 1
            r = rates[calltype]

            p = Banker.quantize(r * n, twodig)

            b = p * basictax
            b = b.quantize(twodig)
            sumB += b

            t = p + b

            if calltype:
                d = p * disttax
                d = d.quantize(twodig)
                sumD += d
                t += d

            sumT += t
            print(t, file=outfil)

        outfil.seek(0)
        outfil.truncate()

    return perf.perf_counter() - start
示例#53
0
def bench_FFT(loops, N, cycles):
    twoN = 2 * N
    init_vec = Random(7).RandomVector(twoN)
    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        x = copy_vector(init_vec)
        for i in xrange(cycles):
            FFT_transform(twoN, x)
            FFT_inverse(twoN, x)

    return perf.perf_counter() - t0
示例#54
0
def bench_spawn_wait(loops):
    pool = ThreadPool(1)

    t0 = perf.perf_counter()

    for _ in xrange(loops):
        for _ in xrange(N):
            r = pool.spawn(noop)
            r.get()

    pool.join()
    pool.kill()
    return perf.perf_counter() - t0
示例#55
0
def bench_nbody(loops, reference, iterations):
    # Set up global state
    offset_momentum(BODIES[reference])

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        report_energy()
        advance(0.01, iterations)
        report_energy()

    return perf.perf_counter() - t0
示例#56
0
def benchmark(conn, data):

    spent_total = 0

    for _ in range(N):
        start = perf.perf_counter()
        conn.sendall(data)
        spent = perf.perf_counter() - start
        spent_total += spent


    runs.append(spent_total)
    return spent_total
示例#57
0
def bench_wait_func_ready():
    from gevent.hub import wait
    class ToWatch(object):
        def rawlink(self, cb):
            cb(self)

    watched_objects = [ToWatch() for _ in range(N)]

    t0 = perf_counter()

    wait(watched_objects)

    return perf_counter() - t0
示例#58
0
def _sendto(loops, conn, data, to_send=None):
    addr = ('127.0.0.1', 55678)
    spent_total = 0
    sent = 0
    to_send = len(data) if to_send is None else to_send
    for __ in range(loops):
        for _ in range(N):
            start = perf.perf_counter()
            while sent < to_send:
                sent += conn.sendto(data, 0, addr)
            spent = perf.perf_counter() - start
            spent_total += spent

    return spent_total
示例#59
0
def bench_mdp(loops):
    expected = 0.89873589887
    max_diff = 1e-6
    range_it = xrange(loops)

    t0 = perf.perf_counter()
    for _ in range_it:
        result = Battle().evaluate(0.192)
    dt = perf.perf_counter() - t0

    if abs(result - expected) > max_diff:
        raise Exception("invalid result: got %s, expected %s "
                        "(diff: %s, max diff: %s)"
                        % (result, expected, result - expected, max_diff))
    return dt