def testCluster(): sc = cluster_manager.ClusterManager("tt", [ "hq.lufex.com:5000 1", ], logger=logger.screen_logger()) clustercall = task.TaskCreator(sc, logger.screen_logger()) s = clustercall.Server("/websocket/echo", "Hello WS Cluster", "ws", auth=("app", "1111"), mapreduce=False) threading.Thread(target=__reduce, args=(s, )).start() while 1: asyncore.loop(timeout=1, count=2) if len(asyncore.socket_map) == 1: break
def testSocketPool(): sc = socketpool.SocketPool(logger.screen_logger()) clustercall = task.TaskCreator(sc, logger.screen_logger()) s = clustercall.Server("http://hq.lufex.com:5000/websocket/echo", "Hello WS Sock Pool", "ws", auth=("app", "1111")) #s = clustercall.Server ("http://210.116.122.187:3424/rpc2", "admin/whddlgkr") #s.bladese.util.status ("openfos.v2") threading.Thread(target=__reduce, args=(s, )).start() while 1: asyncore.loop(timeout=1, count=2) print(asyncore.socket_map) if len(asyncore.socket_map) == 1: break
def make_logger(self, prefix, freq="daily"): self.lock.acquire() has_prefix = prefix in self.logger_factory if has_prefix: self.lock.release() raise TypeError("%s is already used" % prefix) _logger = logger.multi_logger() if self.path and 'file' in self.media: _logger.add_logger(logger.rotate_logger(self.path, prefix, freq)) if 'screen' in self.media: _logger.add_logger(logger.screen_logger()) self.logger_factory[prefix] = _logger self.lock.release()
def test_adns(): adns.init(logger.screen_logger(), [], "tcp") adns.query("www.microsoft.com", "A", callback1) loop() adns.query("www.cnn.comx", "A", callback2) adns.query("www.cnn.comx", "A", callback2) loop() adns.query("www.cnn.comx", "A", callback2) loop() # 3 strike error! adns.query("www.cnn.comx", "A", callback3) loop() ans = adns.get("www.cnn.comx", "A") assert ans[0]["error"] == "too many error"
def test_adns(): dns.create_pool([], logger.screen_logger()) for p in ("udp", "tcp"): dns.query("www.microsoft.com", protocol=p, callback=_print, qtype="a") dns.query("www.cnn.com", protocol=p, callback=_print, qtype="a") dns.query("www.gitlab.com", protocol=p, callback=_print, qtype="a") dns.query("www.alexa.com", protocol=p, callback=_print, qtype="a") dns.query("www.yahoo.com", protocol=p, callback=_print, qtype="a") dns.query("www.github.com", protocol=p, callback=_print, qtype="a") dns.query("www.google.com", protocol=p, callback=_print, qtype="a") dns.query("www.amazon.com", protocol=p, callback=_print, qtype="a") dns.query("www.almec.com", protocol=p, callback=_print, qtype="a") dns.query("www.alamobeauty.com", protocol=p, callback=_print, qtype="a") dns.query("www.alphaworld.com", protocol=p, callback=_print, qtype="a") dns.query("www.allrightsales.com", protocol=p, callback=_print, qtype="a") dns.query("www.glasteel.com", protocol=p, callback=_print, qtype="a") loop()
except IndexError: usage() sys.exit() for k, v in argopt: if k == "-c": clients = int(v) elif k == "-r": req = int(v) elif k == "-k": use_keep_alive = True elif k == "--help": usage() sys.exit() requests.configure(logger.screen_logger(), clients, 10, default_option="--http-connection " + (use_keep_alive and "keep-alive" or "close")) for i in range(clients): requests.add(url, handle_response) t = timer() requests.get_all() total_time = t.end() print(('\n%d clients\n%d hits/client\n' 'total hits:%d\n' 'total errors:%d\n%.3f seconds\ntotal hits/sec:%.3f' %
callback=test_callback, qtype="a") def _print(ans): if ans: print(ans[0]['name'], ans[-1]['data']) else: print("FAILED") if __name__ == "__main__": from rs4 import logger import pprint create_pool(PUBLIC_DNS_SERVERS, logger.screen_logger()) for i in range(4): #query ("www.microsoft.com", protocol = "udp", callback = _print, qtype="a") #query ("www.cnn.com", protocol = "udp", callback = _print, qtype="a") #query ("www.gitlab.com", protocol = "udp", callback = _print, qtype="a") #query ("www.alexa.com", protocol = "udp", callback = _print, qtype="a") #query ("www.yahoo.com", protocol = "udp", callback = _print, qtype="a") #query ("www.github.com", protocol = "udp", callback = _print, qtype="a") #query ("www.google.com", protocol = "udp", callback = _print, qtype="a") #query ("www.amazon.com", protocol = "udp", callback = _print, qtype="a") #query ("www.almec.com", protocol = "udp", callback = _print, qtype="a") #query ("www.alamobeauty.com", protocol = "udp", callback = _print, qtype="a") #query ("www.alphaworld.com", protocol = "udp", callback = _print, qtype="a") #query ("www.allrightsales.com", protocol = "udp", callback = _print, qtype="a") query("www.glasteel.com", protocol="udp", callback=_print, qtype="a")
from __future__ import print_function from aquests.protocols.http import requests from rs4 import logger import time def handle_response (rc): global total_sessions, clients, req, total_errors, resp_codes print (rc.response.code) requests.configure ( logger.screen_logger (), 2, 10, default_option = "--http-connection keep-alive" ) requests.add ("http://*****:*****@hq.lufex.com:5000/hello", handle_response) requests.get_all ()
def get(server, dbname, auth, dbtype): return pool.get(server, dbname, auth, dbtype) def cleanup(): pool.cleanup() if __name__ == "__main__": from skitai import lifetime from rs4 import logger from aquests.server.threads import trigger trigger.start_trigger() pool = DBPool(logger.screen_logger()) def query(): conn = pool.get("mydb.us-east-1.rds.amazonaws.com:5432", "mydb", "postgres", "") conn.execute("SELECT * FROM cities;") rs = conn.fetchwait(5) print(rs.status, rs.result) conn.execute( "INSERT INTO weather VALUES ('San Francisco', 46, 50, 0.25, '1994-11-27');" ) rs = conn.wait(5) print(rs.status, rs.result) conn.execute(
def __init__(self, workers=N_CPU, zombie_timeout=None, logger=None): self.logger = logger or screen_logger() self.executors = [ ThreadExecutor(workers, zombie_timeout, self.logger), ProcessExecutor(workers, zombie_timeout, self.logger) ]
def add_screen_logger(self): for prefix, _logger in list(self.logger_factory.items()): _logger.add_logger(logger.screen_logger())
def configure ( workers = 1, logger = None, callback = None, timeout = 10, cookie = False, force_http1 = False, http2_constreams = 1, allow_redirects = True, qrandom = False, use_pool = True, tracking = False, backend = False, dns = [] ): global _logger, _cb_gateway, _concurrent, _initialized, _timeout global _workers, _que, _allow_redirects, _force_h1 if logger is None: logger = logger_f.screen_logger () _logger = logger if qrandom: _que = queue.RandomQueue () else: _que = queue.Queue () _allow_redirects = allow_redirects _force_h1 = request_handler.RequestHandler.FORCE_HTTP_11 = force_http1 if not use_pool: asynconnect.AsynConnect.keep_connect = use_pool asynconnect.AsynSSLConnect.keep_connect = use_pool if not _force_h1: asynconnect.AsynConnect.fifo_class = await_fifo asynconnect.AsynSSLConnect.fifo_class = await_fifo http2.MAX_HTTP2_CONCURRENT_STREAMS = http2_constreams _workers = workers _concurrent = workers if not force_http1: _concurrent = workers * http2_constreams elif http2_constreams: pass #_logger ("parameter http2_constreams is ignored", "warn") if callback: _cb_gateway = callback if cookie: ls.create (_logger) _timeout = timeout client.set_timeout (timeout) dbapi.set_timeout (timeout) socketpool.create (_logger, backend = backend, use_pool = use_pool) dbpool.create (_logger, backend = backend) adns.init (_logger, dns) lifetime.init (_timeout / 2., logger) # maintern interval if tracking: lifetime.enable_memory_track () _initialized = True
def app(): app_ = Atila(__name__) app_.logger = logger.screen_logger() return app_