示例#1
0
    def test_cortex_xact_deadlock(self):
        N = 100
        prop = 'testform'
        fd = tempfile.NamedTemporaryFile()
        dmon = s_daemon.Daemon()
        pool = s_threads.Pool(size=4, maxsize=8)
        wait = s_eventbus.Waiter(pool, 1, 'pool:work:fini')

        with s_cortex.openurl('sqlite:///%s' % fd.name) as core:

            def populate():
                for i in range(N):
                    #print('wrote %d tufos' % i)
                    core.formTufoByProp(prop, str(i), **{})

            dmon.share('core', core)
            link = dmon.listen('tcp://127.0.0.1:0/core')
            prox = s_telepath.openurl('tcp://127.0.0.1:%d/core' % link[1]['port'])

            pool.wrap(populate)()
            for i in range(N):
                tufos = prox.getTufosByProp(prop)
                #print('got %d tufos' % len(tufos))

            wait.wait()
            pool.fini()
示例#2
0
    def test_threads_pool(self):

        func = s_t_common.CallBack()
        with s_threads.Pool() as pool:

            pool.call(func, 20, 30)

            self.true(func.wait(timeout=1))
            func.args = (20, 30)
示例#3
0
    def test_threads_pool(self):
        def woot(x, y):
            return x + y

        with s_threads.Pool() as pool:
            with pool.task(woot, 20, 30) as task:
                pass

            self.true(task.waitfini(timeout=1))
示例#4
0
    def test_threads_pool_wrap(self):
        evnt = threading.Event()

        def woot(x, y):
            evnt.set()
            return x + y

        with s_threads.Pool() as pool:
            pool.wrap(woot)(20, 30)
            self.true(evnt.wait(timeout=1))
示例#5
0
    def runBossPool(self, size, maxsize=None):
        '''
        Create and run a thread pool for this Boss()

        Example:

            boss.runBossPool(3)

        '''
        pool = s_threads.Pool(size=size, maxsize=maxsize)
        self.onfini(pool.fini)
        self.setBossPool(pool)
示例#6
0
    def test_threads_pool(self):

        pool = s_threads.Pool()

        wait = s_eventbus.Waiter(pool, 1, 'pool:work:fini')

        def woot(x, y):
            return x + y

        pool.task(newtask(woot, 20, 30))

        wait.wait()
        pool.fini()
示例#7
0
    def test_threads_pool_wrap(self):

        pool = s_threads.Pool()

        wait = s_eventbus.Waiter(pool, 1, 'pool:work:fini')

        def woot(x, y):
            return x + y

        pool.wrap(woot)(20, 30)

        wait.wait()
        pool.fini()
示例#8
0
    def test_async_custom_pool_basics(self):
        # Demonstrate Boss use with a custom thread pool.
        boss = s_async.Boss()

        my_pool = s_threads.Pool(3, maxsize=8)

        data = {}

        def jobmeth(x, y=20):
            return x + y

        def jobdork(x, y=20):
            raise Exception('hi')

        def jobdone(job):
            name = job[1].get('name')
            data[name] = job

        jid1 = s_async.jobid()
        jid2 = s_async.jobid()

        task1 = (jobmeth, (3, ), {})
        task2 = (jobdork, (3, ), {})

        job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone)
        job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone)

        self.eq(job1[0], jid1)
        self.eq(job2[0], jid2)

        # Test __iter__ since we've got jobs in the boss that haven't been run.
        jobs = [job for job in boss]
        self.eq(len(jobs), 2)

        my_pool.call(boss._runJob, job1)
        my_pool.call(boss._runJob, job2)

        boss.wait(jid1, timeout=1)
        boss.wait(jid2, timeout=1)

        ret1 = data.get('job1')

        self.nn(ret1)
        self.eq(ret1[1]['ret'], 23)

        ret2 = data.get('job2')
        self.nn(ret2)
        self.eq(ret2[1]['err'], 'Exception')
        self.eq(ret2[1]['errmsg'], 'hi')

        boss.fini()
示例#9
0
    def __init__(self, relay, plex=None, sock=None):

        s_eventbus.EventBus.__init__(self)
        self.onfini(self._onProxyFini)

        # NOTE: the _tele_ prefixes are designed to prevent accidental
        #       derefs with overlapping names from working correctly

        self._tele_sid = None

        self._tele_q = s_queue.Queue()
        self._tele_pushed = {}

        if plex is None:
            plex = s_socket.Plex()

        self._tele_plex = plex
        self._tele_boss = s_async.Boss()

        self._tele_plex.on('link:sock:mesg', self._onLinkSockMesg)

        self._raw_on('tele:yield:init', self._onTeleYieldInit)
        self._raw_on('tele:yield:item', self._onTeleYieldItem)
        self._raw_on('tele:yield:fini', self._onTeleYieldFini)

        self._raw_on('job:done', self._tele_boss.dist)
        self._raw_on('sock:gzip', self._onSockGzip)
        self._raw_on('tele:call', self._onTeleCall)

        poolmax = relay.getLinkProp('poolmax', -1)
        poolsize = relay.getLinkProp('poolsize', 0)

        self._tele_cthr = self.consume(self._tele_q)
        self._tele_pool = s_threads.Pool(size=poolsize, maxsize=poolmax)

        self._tele_ons = {}

        self._tele_sock = None
        self._tele_relay = relay  # LinkRelay()
        self._tele_link = relay.link
        self._tele_yields = {}
        self._tele_csides = {}
        self._tele_reflect = None

        # obj name is path minus leading "/"
        self._tele_name = relay.link[1].get('path')[1:]

        if sock is None:
            sock = self._tele_relay.connect()

        self._initTeleSock(sock=sock)
示例#10
0
    def __init__(self, relay, plex=None):

        s_eventbus.EventBus.__init__(self)
        self.onfini(self._onProxyFini)

        # NOTE: the _tele_ prefixes are designed to prevent accidental
        #       derefs with overlapping names from working correctly

        self._tele_sid = None
        self._tele_pki = None

        self._tele_q = s_queue.Queue()
        self._tele_pushed = {}

        if plex == None:
            plex = s_socket.Plex()

        self._tele_plex = plex
        self._tele_boss = s_async.Boss()

        self._raw_on('tele:yield:init', self._onTeleYieldInit)
        self._raw_on('tele:yield:item', self._onTeleYieldItem)
        self._raw_on('tele:yield:fini', self._onTeleYieldFini)

        self._raw_on('job:done', self._tele_boss.dist)
        self._raw_on('tele:call', self._onTeleCall)

        poolmax = relay.getLinkProp('poolmax', -1)
        poolsize = relay.getLinkProp('poolsize', 0)

        self._tele_cthr = self.consume(self._tele_q)
        self._tele_pool = s_threads.Pool(size=poolsize, maxsize=poolmax)

        self._tele_ons = set()
        self._tele_sock = None
        self._tele_relay = relay  # LinkRelay()
        self._tele_yields = {}

        # obj name is path minus leading "/"
        self._tele_name = relay.link[1].get('path')[1:]

        if relay.getLinkProp('pki'):

            #TODO pkiurl

            self._tele_pki = relay.getLinkProp('pkistor')
            if self._tele_pki == None:
                self._tele_pki = s_pki.getUserPki()

        self._initTeleSock()
示例#11
0
    def test_threads_exception(self):
        data = {}

        def breakstuff():
            data['key'] = True
            return 1 / 0

        with self.getLoggerStream('synapse.lib.threads',
                                  'error running task for') as stream:
            with s_threads.Pool() as pool:
                pool.call(breakstuff)
                self.true(stream.wait(2))

        self.true(data.get('key'))
示例#12
0
    def __init__(self, pool=None):

        EventBus.__init__(self)

        if pool is None:
            pool = s_threads.Pool()

        self.pool = pool
        self.root = None

        self.lock = threading.Lock()
        self.wake = threading.Event()

        self.thr = self._runSchedMain()
        self.onfini(self._onSchedFini)
示例#13
0
    def test_threads_exception(self):
        data = {}
        def breakstuff():
            data['key'] = True
            return 1 / 0

        with self.getLoggerStream('synapse.lib.threads') as stream:
            with s_threads.Pool() as pool:
                pool.call(breakstuff)
                time.sleep(0.1)

        self.true(data.get('key'))
        stream.seek(0)
        mesgs = stream.read()
        self.isin('error running task for', mesgs)
示例#14
0
    def __init__(self, pool=None):
        EventBus.__init__(self)
        DmonConf.__init__(self)

        self.auth = None
        self.socks = {}  # sockets by iden
        self.shared = {}  # objects provided by daemon
        self.pushed = {}  # objects provided by sockets
        self.csides = {}  # item:[ (name,path), ... ]
        self.reflect = {}  # objects reflect info by name

        self._dmon_ons = {}
        self._dmon_links = []  # list of listen links
        self._dmon_yields = set()

        if pool is None:
            pool = s_threads.Pool(size=8, maxsize=-1)

        self.pool = pool
        self.plex = s_socket.Plex()
        self.cura = s_session.Curator()

        self.onfini(self.plex.fini)
        self.onfini(self.pool.fini)
        self.onfini(self.cura.fini)

        self.on('link:sock:init', self._onLinkSockInit)
        self.plex.on('link:sock:mesg', self._onLinkSockMesg)

        self.mesgfuncs = {}

        self.setMesgFunc('tele:syn', self._onTeleSynMesg)
        self.setMesgFunc('sock:gzip', self._onSockGzipMesg)

        self.setMesgFunc('tele:call', self._onTeleCallMesg)

        # for "client shared" objects...
        self.setMesgFunc('tele:push', self._onTelePushMesg)
        self.setMesgFunc('tele:retn', self._onTeleRetnMesg)

        self.setMesgFunc('tele:on', self._onTeleOnMesg)
        self.setMesgFunc('tele:off', self._onTeleOffMesg)

        self.setMesgFunc('tele:yield:fini', self._onTeleYieldFini)
示例#15
0
    def __init__(self, core=None, pool=None):
        EventBus.__init__(self)
        DmonConf.__init__(self)

        if core == None:
            core = s_cortex.openurl('ram:///')

        self.socks = {}  # sockets by iden
        self.shared = {}  # objects provided by daemon
        self.pushed = {}  # objects provided by sockets

        self._dmon_links = []  # list of listen links
        self._dmon_yields = set()

        if pool == None:
            pool = s_threads.Pool(size=8, maxsize=-1)

        self.pki = s_pki.PkiStor(core)

        self.pool = pool
        self.core = core
        self.plex = s_socket.Plex()

        self.onfini(self.plex.fini)
        self.onfini(self.pool.fini)

        self.on('link:sock:init', self._onLinkSockInit)
        self.plex.on('link:sock:mesg', self._onLinkSockMesg)

        self.mesgfuncs = {}

        self.setMesgFunc('tele:syn', self._onTeleSynMesg)

        self.setMesgFunc('tele:skey', self._onTeleSkeyMesg)
        self.setMesgFunc('tele:call', self._onTeleCallMesg)

        # for "client shared" objects...
        self.setMesgFunc('tele:push', self._onTelePushMesg)
        self.setMesgFunc('tele:retn', self._onTeleRetnMesg)

        self.setMesgFunc('tele:on', self._onTeleOnMesg)
        self.setMesgFunc('tele:off', self._onTeleOffMesg)

        self.setMesgFunc('tele:yield:fini', self._onTeleYieldFini)
示例#16
0
    def __init__(self, conf=None):

        s_config.Config.__init__(self, conf)

        self.epoll = select.epoll()

        self.socks = {}
        self.links = {}

        self.thrd = s_threads.worker(self._runPollLoop)

        self.onfini(self._onPlexFini)

        pmax = self.getConfOpt('pool:max')
        self.pool = s_threads.Pool(maxsize=pmax)

        self.onfini(self.pool.fini)

        self.polls = {}
示例#17
0
    def __init__(self, core=None, opts=None, *args, **kwargs):
        s_config.Config.__init__(self)
        # Runtime-settable options
        self.onConfOptSet(CACHE_ENABLED, self._onSetWebCache)
        self.onConfOptSet(CACHE_TIMEOUT, self._onSetWebCacheTimeout)

        # Things we need prior to loading in conf values
        self.web_boss = s_async.Boss()
        self.web_cache = s_cache.Cache()
        self.web_cache_enabled = False

        if opts:
            self.setConfOpts(opts)

        self._web_required_keys = ('namespace', 'doc', 'apis')

        self._web_apis = {}
        self._web_namespaces = set([])
        self._web_docs = {}
        self._web_default_http_args = {
        }  # Global request headers per namespace

        # Check configable options before we spin up any more resources
        max_clients = self.getConfOpt(MAX_CLIENTS)
        pool_min = self.getConfOpt(MIN_WORKER_THREADS)
        pool_max = self.getConfOpt(MAX_WORKER_THREADS)
        if pool_min < 1:
            raise s_common.BadConfValu(
                name=MIN_WORKER_THREADS,
                valu=pool_min,
                mesg='web:worker:threads:min must be greater than 1')
        if pool_max < pool_min:
            raise s_common.BadConfValu(
                name=MAX_WORKER_THREADS,
                valu=pool_max,
                mesg=
                'web:worker:threads:max must be greater than the web:worker:threads:min'
            )
        if max_clients < 1:
            raise s_common.BadConfValu(
                name=MAX_CLIENTS,
                valu=max_clients,
                mesg='web:tornado:max_clients must be greater than 1')
        # Tornado Async
        loop = kwargs.get('ioloop')
        if loop is None:
            loop = t_ioloop.IOLoop()
        self.web_loop = loop
        self.web_client = t_http.AsyncHTTPClient(io_loop=self.web_loop,
                                                 max_clients=max_clients)
        self.web_iothr = self._runIoLoop()

        # Synapse Async and thread pool
        self.web_pool = s_threads.Pool(pool_min, pool_max)

        # Synapse Core and ingest tracking
        if core is None:
            core = s_cortex.openurl('ram://')
            self.onfini(core.fini)
        self.web_core = core
        self._web_api_ingests = collections.defaultdict(list)
        self._web_api_gest_opens = {}

        # Setup Fini handlers
        self.onfini(self._onHypoFini)

        # List of content-type headers to skip automatic decoding
        self._web_content_type_skip = set([])
        self.webContentTypeSkipAdd('application/octet-stream')
        for ct in kwargs.get('content_type_skip', []):
            self.webContentTypeSkipAdd(ct)
示例#18
0
import threading
import multiprocessing

lock = threading.RLock()

import synapse.lib.sched as s_sched
import synapse.lib.threads as s_threads

# go high since they will mostly be IO bound
tmax = multiprocessing.cpu_count() * 8

pool = s_threads.Pool(maxsize=tmax)
sched = s_sched.Sched(pool=pool)