def test_managed ():
        test_connections (connections, hosts)
        async_loop.dispatch ()
        finalization.collect ()
        test_cache (cache, hosts)
        async_loop.dispatch ()
        finalization.collect ()
        test_pool (pool)
        async_loop.dispatch ()
        finalization.collect ()
        ' | Copyleft GPL 2.0',
        'info'
        )
    import glob, os, stat
    joined = finalization.Finalization ()
    for listen in [
        tcp_server.decorate (presto.Listen (
            path, addr, precision
            ), inactive, local, private, public)
        for path, addr in [
            (
                    p.replace ('\\', '/'), 
                    ip_peer.addr (os.path.basename (p), 80)
                    )
            for p in glob.glob ('%s/*' % root)
            if stat.S_ISDIR (os.stat (p)[0])
            ]
        ]:
        for filename in listen.presto_dir ():
                listen.presto_load (filename)
        async_loop.catch (listen.server_shutdown)
        listen.finalization = joined
    joined.finalization = finalize
    loginfo.log ('loaded in %f seconds' % (time.time () - started), 'info')

import sys
from allegra import async_loop, finalization, anoption
anoption.cli (main, sys.argv[1:])
async_loop.dispatch ()
assert None == finalization.collect ()
sys.exit (0)
async_loop.dispatch()


# Adding Features

from allegra import finalization, async_limits, synchronized

dispatcher = async_chat.Dispatcher()
if async_client.connect(dispatcher, ("66.249.91.99", 80), 3):
    dispatcher.async_chat_push("GET / HTTP/1.1\r\n" "Host: 66.249.91.99\r\n" "Connection: keep-alive\r\n" "\r\n")
    collector.bind(dispatcher, synchronized.File_collector("response.txt"))
    async_limits.limit_recv(dispatcher, 3, 1, (lambda: 1 << 13))
    dispatcher.finalization = lambda finalized: finalized.log('bytes in="%d"' % finalized.ac_in_meter)
    del dispatcher
async_loop.dispatch()
finalization.collect()


# Decouple A Lot

dispatcher = async_chat.Dispatcher()
async_limits.limit_recv(dispatcher, 3, 1, (lambda: 1 << 13))
dispatcher.finalization = lambda finalized: finalized.log('bytes in="%d"' % finalized.ac_in_meter)
collector.bind(dispatcher, synchronized.File_collector("response.txt"))
dispatcher.async_chat_push(
    "GET /intl/zh-CN/ HTTP/1.1\r\n" "Host: www.google.com\r\n" "Connection: keep-alive\r\n" "\r\n"
)
async_client.connect(dispatcher, ("66.249.91.99", 80), 3)
del dispatcher
async_loop.dispatch()
finalization.collect()
Exemple #4
0
        def server_close(self, dispatcher):
            async_server.Listen.server_close(self, dispatcher)
            dispatcher.anetlog_file.close()
            _link('/'.join((root, 'tmp', dispatcher.anetlog_name)), '/'.join(
                (root, 'new', dispatcher.anetlog_name)))

    async_loop.catch(
        Listen(Dispatcher, (host, port), precision, 5).server_shutdown)


if __name__ == '__main__':
    import sys
    from allegra import async_loop, finalization, anoption
    anoption.cli(main, sys.argv[1:])
    async_loop.dispatch()
    assert None == finalization.collect()

# Note about this implementation
#
# This is a very simple netlog server that accept any connection,
# dump anything received in a new file, then close and link it from
# tmp/ into new/ when the socket is closed.
#
# Note that in debug mode it also prints out everything to STDOUT.
#
# It may block once in a while when writing to the file under high-load,
# but the purpose here is not to respond to requests asap, it's an end point
# of contention ... out of its logging client, distributed on the network.
# Blocking at that point has little effect on the logging applications:
# the output queue of their netlog dispatcher will simply have to grow.
#
Exemple #5
0

try:
    for ansql in (Dispatcher() for i in xrange(clients)):
        if tcp_client.connect(ansql, addr):
            stmt, param = loads(statements.next())
            Dispatcher.stmt_count += 1
            if Dispatcher.stmt_count < Dispatcher.stmt_limit:
                sql(sql.test_callback, stmt, param)
except:
    loginfo.traceback()
    sys.exit(2)

started = time.time()
async_loop.dispatch()
finalization.collect()
loginfo.log(
    '%d statements in %f seconds' %
    (Dispatcher.stmt_count, time.time() - started), 'info')
sys.exit(0)

# Here's the idea: open with N client connections playing a sequence
# of statements over, but starting at random, stop when the given count
# of requests made is reached, synchronize the next request with the last
# response. This is a next-best simulation of N user agent banging the
# database access controller implemented by the SQL peer application.
#
# The question is how many SQL transactions like X, Y and Z can a single
# asynchronous database handle per second, preferably loaded with real data.
#
# That's how you develop a test driven application.