Esempio n. 1
0
 def __init__(self, pool_size=4, task_size=100):
     super(EventBus, self).__init__()
     self.pool = ThreadPool(pool_size)
     self.async_events = TaskPool(task_size)
     self.event_handlers = dict()
     self.con = Condition()
     self.init()
Esempio n. 2
0
def run_tests(tests, summary):
    jobs = OPTIONS.workercount
    # python 3.3 fixed a bug with concurrently writing .pyc files.
    # https://bugs.python.org/issue13146
    embedded_version = (subprocess.check_output([
        OPTIONS.gdb_executable,
        "--batch",
        "--ex",
        "python import sys; print(sys.hexversion)",
    ]).decode("ascii").strip())
    if hex(int(embedded_version)) < "0x3030000":
        jobs = 1

    pool = TaskPool(tests, job_limit=jobs, timeout=OPTIONS.timeout)
    pool.run_all()
Esempio n. 3
0
    def __worker_prepare(self, taskpool=None):
        Worker.setWorkerConfig(
            rootlogger=LOGGER,
            basedomain=self.__config["--basedomain"]
            if self.__config["--basedomain"] else None,
            errorpage=self.__config["--errorpage"]
            if self.__config["--errorpage"] else None,
            taskpool=TaskPool(distinct_filter_dump="filter_dump_{now}".format(
                now=NOW)) if not taskpool else taskpool)

        if not self.resume:
            Worker.Worker_taskpool.put(self.__config["<src>"])

        if self.__config["--debug"]:
            map(Worker.add_debuglog_handler,
                (getHandler(logging.FileHandler, filename=dbg_log_file)
                 for dbg_log_file in self.__config["--dbglog"]))
            Worker.add_debuglog_handler(getHandler(logging.StreamHandler))

        if self.__config["--syslog"]:
            map(Worker.add_syslog_handler,
                (getHandler(hdlr_cls=logging.FileHandler,
                            filename=sys_log_file)
                 for sys_log_file in self.__config["--syslog"]))
        elif not self.__config["--syslog"] and not self.__config["--debug"]:
            Worker.add_syslog_handler(
                getHandler(hdlr_cls=logging.StreamHandler))

        if self.__config["--errlog"]:
            map(Worker.add_errorlog_handler,
                (getHandler(hdlr_cls=logging.FileHandler,
                            filename=err_log_file)
                 for err_log_file in self.__config["--errlog"]))
        elif not self.__config["--errlog"] and not self.__config["--debug"]:
            Worker.add_errorlog_handler(
                getHandler(hdlr_cls=logging.StreamHandler))
Esempio n. 4
0
def run_tests(tests, summary):
    pool = TaskPool(tests,
                    job_limit=OPTIONS.workercount,
                    timeout=OPTIONS.timeout)
    pool.run_all()
Esempio n. 5
0
import threading
import os
import sys
import json
from BaseHTTPServer import HTTPServer

from taskpool import TaskPool
from HTTPlistener import httpServerFactory

f = open('taskpool_config', 'r')
master_config = json.load(f)
f.close


tp = TaskPool(master_config)
taskpool_thread = threading.Thread(target=tp.loop, args=()).start()

ip = master_config['master address']
port = master_config['master port']
HTTPListener = httpServerFactory({'taskpool': tp})
serv = HTTPServer((ip, port), HTTPListener)
print 'Starting master http server at port %d ....'%master_config['master port']


try:
    serv.serve_forever()
except KeyboardInterrupt:
    tp.close_connection()
    print 'KeyboardInterrupt 8P'
    os._exit(0)
Esempio n. 6
0
        return 'content for key %s: %s' % (inner_key, random.randint(
            1, 100000))

    content = read_cached(inner_open_test_random, key)
    return content


if __name__ == '__main__':
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
    logging.getLogger('requests').setLevel(logging.WARNING)
    file_handler = logging.FileHandler('download-vessels-details.log',
                                       mode='w')
    formatter = logging.Formatter(
        '%(asctime)s:%(name)s:%(levelname)s:%(message)s')
    file_handler.setFormatter(formatter)
    logging.getLogger().addHandler(file_handler)

    set_cache_path('output/tests', max_node_files=400, rebalancing_limit=1000)
    delete_cache()

    tasks = TaskPool(30)

    for count in range(10000):
        tasks.add_task(open_test_random, count)

    results = tasks.execute()
    logging.info('results: %s', results)
    delete_cache()