示例#1
0
def test_calls(loops):
    f = Foo()
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # 20 calls
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)

    return pyperf.perf_counter() - t0
示例#2
0
def test_calls(loops):
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for loops in range_it:
        # 20 calls
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)

    return pyperf.perf_counter() - t0
def _bench_thrift(loops=1000):
    """Measure using a thrift-generated library N times.

    The target is a simple addressbook.  We measure the following:

    * create an addressbook with 1 person in it
    * serialize it
    * deserialize it into a new addressbook

    For each iteration we repeat this 100 times.
    """
    # proto_factory = TBinaryProtocolFactory()
    proto_factory = TBinaryProtocolAcceleratedFactory()

    elapsed = 0
    times = []
    for _ in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long the Addressbook ops take.
        t0 = pyperf.perf_counter()
        for _ in range(100):
            # First, create the addressbook.
            ab = make_addressbook()
            # Then, round-trip through serialization.
            encoded = serialize(ab, proto_factory)
            ab2 = ttypes.AddressBook()
            deserialize(ab2, encoded, proto_factory)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
    times.append(pyperf.perf_counter())
    return elapsed, times
def find_factory_time_func(loops, ext):
    begin = perf_counter()
    for _ in range(loops):
        for _ in range(INNER_LOOPS):
            default_externalized_object_factory_finder(ext)()
    end = perf_counter()
    return end - begin
示例#5
0
def bench_wait_func_ready(loops=1000):
    """Measure waiting for N noop watch targets to become ready."""
    watched_objects = [NoopWatchTarget() for _ in range(loops)]

    t0 = pyperf.perf_counter()
    gevent.hub.wait(watched_objects)
    return pyperf.perf_counter() - t0
示例#6
0
def bench_formatted_output(loops, logger, stream):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    fmt = FORMAT
    msg = MESSAGE
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)
        logger.warning(fmt, msg)

    dt = pyperf.perf_counter() - t0

    lines = stream.getvalue().splitlines()
    if len(lines) != loops * 10:
        raise ValueError("wrong number of lines")

    return dt
def _bench_kinto(loops=5000, legacy=False):
    if legacy:
        print(requests.get("http://localhost:8000/v1").text)
        # print(requests.put("http://localhost:8000/v1/accounts/testuser", json={"data": {"password": "******"}}).text)

    start = pyperf.perf_counter()
    elapsed = 0
    times = []
    for i in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long a request takes.
        t0 = pyperf.perf_counter()
        # requests.get("http://localhost:8000/v1/").text
        urllib.request.urlopen("http://localhost:8000/v1/").read()
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.3freq/s)" % (total, loops / total))
    return elapsed, times
def _bench_mypy(loops=20, *, legacy=False):
    """Meansure running mypy on a file N times.

    The target file is large (over 2300 lines) with extensive use
    of type hints.

    Note that mypy's main() is called directly, which means
    the measurement includes the time it takes to read the file
    from disk.  Also, all output is discarded (sent to /dev/null).
    """
    elapsed = 0
    times = []
    with open(os.devnull, "w") as devnull:
        for i in range(loops):
            if legacy:
                print(i)
            # This is a macro benchmark for a Python implementation
            # so "elapsed" covers more than just how long main() takes.
            t0 = pyperf.perf_counter()
            try:
                main(None, devnull, devnull, TARGETS, clean_exit=True)
            except SystemExit:
                pass
            t1 = pyperf.perf_counter()

            elapsed += t1 - t0
            times.append(t0)
        times.append(pyperf.perf_counter())
    return elapsed, times
示例#9
0
def test_calls(loops):
    a = Foo()
    b = Bar()
    c = Baz()
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # 18 calls
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)
        a.foo(b, c)
        b.foo(c, a)
        c.foo(a, b)

    return pyperf.perf_counter() - t0
示例#10
0
    def populate_not_equal(loops):
        # Because we will populate when we make,
        # capture memory now to be able to include that.
        client = makeOne()
        duration = 0

        for loop in range(loops):
            all_data = _make_data(random_data, KEY_GROUP_SIZE)

            begin = perf_counter()
            for oid, (state, tid) in all_data:
                # install a copy that's not quite equal.
                # This should require saving it.
                # Note that we must use a different TID, or we
                # get CacheConsistencyError.
                state = state + str(loop).encode('ascii')
                tid = tid + loop + 1
                key = (oid, tid)
                new_v = (state, tid)
                client[key] = new_v
            duration += perf_counter() - begin
            all_data = None
            report(client, duration, extra=" (Loop " + str(loop) + ") ")

        report(client, duration, extra=" (Final ) ")
        return duration
示例#11
0
    def mixed(loops, client_and_keys=None):
        if client_and_keys is None:
            key_groups = _make_key_groups(KEY_GROUP_SIZE)
            client = makeOne(populate=True)
        else:
            client, key_groups = client_and_keys

        hot_keys = key_groups[0]

        duration = 0
        i = 0
        miss_count = 0

        for _ in range(loops):
            all_data = _make_data(random_data, KEY_GROUP_SIZE)

            begin = perf_counter()
            for oid, (state, tid) in all_data:
                i += 1
                key = (oid, tid)
                client[key] = (state, tid)
                if i == len(hot_keys):
                    for hot_oid in hot_keys:
                        hot_key = (hot_oid, hot_oid)
                        res = client[hot_key]
                        if not res:
                            miss_count += 1
                    i = 0
            duration += perf_counter() - begin
            all_data = None

        report(client, duration, extra="(Final)")
        return duration
示例#12
0
    def read(loops):
        # This is basically the worst-case scenario for a basic
        # segmented LRU: A repeating sequential scan, where no new
        # keys are added and all existing keys fit in the two parts of the
        # cache. Thus, entries just keep bouncing back and forth between
        # probation and protected. It so happens that this is our slowest
        # case.
        client = makeOne(populate=True)
        key_groups = _make_key_groups(KEY_GROUP_SIZE)
        begin = perf_counter()
        duration = 0

        for _ in range(loops):
            client = makeOne(populate=True)
            begin = perf_counter()
            for keys in key_groups:
                for oid in keys:
                    tid = oid
                    key = (oid, tid)
                    res = client[key]
                    #assert len(res) == len(keys)
                    if not res:
                        continue
                    assert res[0] == random_data
            duration += (perf_counter() - begin)
        print("Hit ratio: ", client.stats()['ratio'])

        duration = perf_counter() - begin
        key_groups = None

        report(client, duration, extra=" (Final ) ")
        return duration
示例#13
0
    def test(self, loops):

        s = u''.join(map(unichr, range(20)))
        t = u''.join(map(unichr, range(100)))
        u = u''.join(map(unichr, range(500)))
        v = u''.join(map(unichr, range(1000)))
        range_it = xrange(loops)
        t0 = pyperf.perf_counter()

        for _ in range_it:

            s.lower()
            s.lower()
            s.lower()
            s.lower()
            s.lower()

            s.upper()
            s.upper()
            s.upper()
            s.upper()
            s.upper()

            s.title()
            s.title()
            s.title()
            s.title()
            s.title()

            t.lower()
            t.lower()
            t.lower()
            t.lower()

            t.upper()
            t.upper()
            t.upper()
            t.upper()

            t.title()
            t.title()
            t.title()
            t.title()

            u.lower()
            u.lower()

            u.upper()
            u.upper()

            u.title()
            u.title()

            v.lower()

            v.upper()

            v.title()

        return pyperf.perf_counter() - t0
示例#14
0
def _bench_json_loads(loops=400):
    """Measure running json.loads() N times.

    The target data is nearly 1100 JSON objects, each on a single line,
    from a file.  The objects:
    
    * are all flat (no compound values)
    * vary a little in number of properties, though none are big
    * have a mix of values, both of type and size

    Only the json.loads() calls are measured.  The following are not:

    * reading the text from the file
    * looping through the lines
    """
    with open(TARGET) as f:
        s = f.read()
    lines = s.splitlines()

    elapsed = 0
    times = []
    for _ in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long json.loads() takes.
        t0 = pyperf.perf_counter()
        for text in lines:
            if not text:
                continue
            json.loads(text)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
    times.append(pyperf.perf_counter())
    return elapsed, times
示例#15
0
def bench_pickle(loops, pickle, options):
    range_it = range(loops)

    # micro-optimization: use fast local variables
    dumps = pickle.dumps
    objs = (DICT, TUPLE, DICT_GROUP)
    protocol = options.protocol
    t0 = pyperf.perf_counter()

    for _ in range_it:
        for obj in objs:
            # 20 dumps
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)
            dumps(obj, protocol)

    return pyperf.perf_counter() - t0
示例#16
0
def bench_pyflake(loops, filename):
    input_fp = open(filename, 'rb')
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        input_fp.seek(0)
        field = RBitfield(input_fp)

        magic = field.readbits(16)
        if magic == 0x1f8b:  # GZip
            out = gzip_main(field)
        elif magic == 0x425a:  # BZip2
            out = bzip2_main(field)
        else:
            raise Exception("Unknown file magic %x, not a gzip/bzip2 file" %
                            hex(magic))

    dt = pyperf.perf_counter() - t0
    input_fp.close()

    if hashlib.md5(out).hexdigest() != "afa004a630fe072901b1d9628b960974":
        raise Exception("MD5 checksum mismatch")

    return dt
示例#17
0
def bench_silent(loops, logger, stream):
    truncate_stream(stream)

    # micro-optimization: use fast local variables
    m = MESSAGE
    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # repeat 10 times
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)
        logger.debug(m)

    dt = pyperf.perf_counter() - t0

    if len(stream.getvalue()) != 0:
        raise ValueError("stream is expected to be empty")

    return dt
def bench_etree(iterations, etree, bench_func):
    xml_root = build_xml_tree(etree)
    xml_data = etree.tostring(xml_root)

    # not using NamedTemporaryFile() here as re-opening it is not portable
    tf, file_path = tempfile.mkstemp()
    try:
        etree.ElementTree(xml_root).write(file_path)

        t0 = pyperf.perf_counter()

        for _ in range(iterations):
            bench_func(etree, file_path, xml_data, xml_root)

        dt = pyperf.perf_counter() - t0
    finally:
        try:
            os.close(tf)
        except EnvironmentError:
            pass
        try:
            os.unlink(file_path)
        except EnvironmentError:
            pass

    return dt
def _bench_flask_requests(loops=1800, legacy=False):
    """Measure N HTTP requests to a local server.

    Note that the server is freshly started here.

    Only the time for requests is measured here.  The following are not:

    * preparing the site the server will serve
    * starting the server
    * stopping the server

    Hence this should be used with bench_time_func()
    insted of bench_func().
    """
    start = pyperf.perf_counter()
    elapsed = 0
    times = []

    requests_get = requests.get
    for i in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long a request takes.
        t0 = pyperf.perf_counter()
        requests_get("http://localhost:8000/blog/").text
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.2f ms / %.3freq/s)" % (total, total / loops * 1e3, loops / total))
    return elapsed, times
示例#20
0
def _bench_iface_call_simple(loops, inst):
    t0 = pyperf.perf_counter()
    for _ in range(loops):
        for _ in range(INNER):
            for iface in ifaces:
                iface(inst)
    return pyperf.perf_counter() - t0
def to_external_object_time_func(loops, obj):
    begin = perf_counter()
    for _ in range(loops):
        for _ in range(INNER_LOOPS):
            toExternalObject(obj)
    end = perf_counter()
    return end - begin
示例#22
0
def bench_in(loops, o):
    t0 = pyperf.perf_counter()
    for _ in range(loops):
        for _ in range(INNER):
            o.__contains__(Interface)

    return pyperf.perf_counter() - t0
示例#23
0
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        cur = Person.delete()
        cur.execute()

        cur = Address.delete()
        cur.execute()

        # Run the benchmark once
        t0 = pyperf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person.insert()
            new_person.execute(name="name %i" % i)

            # Insert an Address in the address table
            new_address = Address.insert()
            new_address.execute(post_code='%05i' % i)

        # do 'npeople' queries per insert
        for i in xrange(npeople):
            cur = Person.select()
            cur.execute()

        total_dt += (pyperf.perf_counter() - t0)

    return total_dt
示例#24
0
def bench_regex_generic(loops):
    if bench_regex_generic.data is None:
        bench_regex_generic.data = init_benchmarks()
    data = bench_regex_generic.data

    range_it = range(loops)
    search = re.search
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # Runs all of the benchmarks for a given value of n.
        for regex, string in data:
            # search 10 times
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])
            search(regex, string[0])

    return pyperf.perf_counter() - t0
def _bench_pycparser(loops=20):
    """Measure running pycparser on several large C files N times.

    The files are all relatively large, from well-known projects.
    Each is already preprocessed.

    Only the CParser.parse() calls are measured.  The following are not:

    * finding the target files
    * reading them from disk
    * creating the CParser object
    """
    files = list(_iter_files())

    elapsed = 0
    times = []
    for _ in range(loops):
        times.append(pyperf.perf_counter())
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long parser.parse() takes.
        t0 = pyperf.perf_counter()
        parse_files(files)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
    times.append(pyperf.perf_counter())
    return elapsed, times
def bench_sqlalchemy(loops, npeople):
    total_dt = 0.0

    for loops in xrange(loops):
        # drop rows created by the previous benchmark
        session.query(Person).delete(synchronize_session=False)
        session.query(Address).delete(synchronize_session=False)

        # Run the benchmark once
        t0 = pyperf.perf_counter()

        for i in xrange(npeople):
            # Insert a Person in the person table
            new_person = Person(name="name %i" % i)
            session.add(new_person)
            session.commit()

            # Insert an Address in the address table
            new_address = Address(post_code='%05i' % i, person=new_person)
            session.add(new_address)
            session.commit()

        # do 100 queries per insert
        for i in xrange(npeople):
            session.query(Person).all()

        total_dt += (pyperf.perf_counter() - t0)

    return total_dt
示例#27
0
def main(loops, level):
    board, solution = LEVELS[level]
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    stream = io.StringIO()

    board = board.strip()
    expected = solution.rstrip()

    range_it = range(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        stream = io.StringIO()
        solve_file(board, strategy, order, stream)
        output = stream.getvalue()
        stream = None

    dt = pyperf.perf_counter() - t0

    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s" %
                             (output, expected))

    return dt
def setup(rootdir):
    """
    Set up a djangocms installation.
    Runs the initial bootstrapping without the db migration,
    so that we can turn off sqlite synchronous and avoid fs time.
    Rough testing shows that setting synchronous=OFF is basically
    the same performance as running on /dev/shm.
    """
    sitedir = os.path.join(rootdir, SITE_NAME)  # This is where Django puts it.

    # Delete the site dir if it already exists.
    if os.path.exists(sitedir):
        shutil.rmtree(datadir, ignore_errors=False)

    # First, create the site.
    subprocess.check_call(ARGV_CREATE, cwd=rootdir)

    # Add customizations.
    settingsfile = os.path.join(sitedir, SITE_NAME, "settings.py")
    with open(settingsfile, "a") as f:
        f.write(SETTINGS)

    # Finalize the site.
    t0 = pyperf.perf_counter()
    subprocess.check_call(ARGV_MIGRATE, cwd=sitedir)
    elapsed = pyperf.perf_counter() - t0

    return sitedir, elapsed
示例#29
0
    def test(self, loops):

        error = ValueError
        be = self.BlockExceptions()
        range_it = xrange(loops)
        t0 = pyperf.perf_counter()

        for _ in range_it:
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")
            with be:
                raise error("something")

        return pyperf.perf_counter() - t0
示例#30
0
def bench_pathlib(loops, tmp_path):
    base_path = pathlib.Path(tmp_path)

    # Warm up the filesystem cache and keep some objects in memory.
    path_objects = list(base_path.iterdir())
    # FIXME: does this code really cache anything?
    for p in path_objects:
        p.stat()
    assert len(path_objects) == NUM_FILES, len(path_objects)

    range_it = xrange(loops)
    t0 = pyperf.perf_counter()

    for _ in range_it:
        # Do something simple with each path.
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()
        for p in base_path.iterdir():
            p.stat()
        for p in base_path.glob("*.py"):
            p.stat()

    return pyperf.perf_counter() - t0