コード例 #1
0
    def test_queue_postfini(self):
        q = s_queue.Queue()
        q.put(1)
        q.put(2)
        q.put(3)
        q.done()
        q.put(4)

        self.eq(q.get(), 1)
        self.eq(q.slice(2), [2, 3])

        self.raises(s_exc.IsFini, q.get)
        self.raises(s_exc.IsFini, q.slice, 1)

        q = s_queue.Queue()
        q.put(1)
        q.fini()
        q.put(2)

        deqdata = []

        [deqdata.append(item) for item in q.deq]

        self.raises(s_exc.IsFini, q.get)
        self.raises(s_exc.IsFini, q.slice, 1)

        self.eq(deqdata, [1])
コード例 #2
0
ファイル: test_lib_queue.py プロジェクト: e2-ibm/synapse
    def test_queue_postfini(self):
        q = s_queue.Queue()
        q.put(1)
        q.put(2)
        q.put(3)
        q.done()
        self.eq(q.get(), 1)
        self.eq(q.slice(2), [2, 3])
        self.eq(q.get(), None)
        self.eq(q.slice(1), None)

        q = s_queue.Queue()
        q.put(1)
        q.fini()
        self.eq(q.get(), None)
        self.eq(q.slice(1), None)
コード例 #3
0
    def test_queue_exit(self):
        q = s_queue.Queue()
        evt = threading.Event()
        data = [1, 2, 3, 4, 5]
        results = []

        @firethread
        def nommer():
            evt.wait()
            try:

                while True:
                    results.append(q.get(timeout=1))

            except s_exc.IsFini as e:
                return

        thr = nommer()
        with q:
            [q.put(item) for item in data]
            evt.set()

        thr.join()

        self.true(q.isfini)
        self.eq(data, results)
コード例 #4
0
    def test_queue_multislice(self):
        # run a queue for several items with a timeout.
        q = s_queue.Queue()
        retn = []

        q.put(1)
        q.put(2)
        q.put(3)
        q.put(4)

        try:

            for slic in q.slices(2, timeout=0.1):
                retn.append(tuple(slic))

        except s_exc.TimeOut as e:
            pass

        q.put(1)
        q.put(2)
        q.put(3)
        q.put(4)

        try:

            for slic in q.slices(2, timeout=0.1):
                retn.append(tuple(slic))

        except s_exc.TimeOut as e:
            pass

        self.eq(tuple(retn), ((1, 2), (3, 4), (1, 2), (3, 4)))
コード例 #5
0
    def __init__(self, item):
        self._mono_item = item
        self._mono_thrq = s_queue.Queue()
        self._mono_thrd = worker(self._runMonoThread)

        if isinstance(item, EventBus):
            item.onfini(self._onMonoFini)
コード例 #6
0
ファイル: test_lib_queue.py プロジェクト: e2-ibm/synapse
    def test_queue_base(self):
        q = s_queue.Queue()
        q.put('woot')

        self.eq(q.get(), 'woot')
        self.none(q.get(timeout=0.1))

        q.fini()
コード例 #7
0
ファイル: test_queue.py プロジェクト: williballenthin/synapse
    def test_queue_base(self):
        q = s_queue.Queue()
        q.put('woot')

        self.assertEqual(q.get(), 'woot')
        self.assertIsNone(q.get(timeout=0.1))

        q.fini()
コード例 #8
0
ファイル: test_lib_queue.py プロジェクト: e2-ibm/synapse
 def test_queue_timeout(self):
     q = s_queue.Queue()
     q.put(1)
     self.eq(q.slice(1, timeout=0.001), [1])
     self.eq(q.slice(1, timeout=0.001), None)
     q.put(1)
     self.eq(q.get(timeout=0.001), 1)
     self.eq(q.get(timeout=0.001), None)
コード例 #9
0
    def __init__(self, relay, plex=None, sock=None):

        s_eventbus.EventBus.__init__(self)
        self.onfini(self._onProxyFini)

        # NOTE: the _tele_ prefixes are designed to prevent accidental
        #       derefs with overlapping names from working correctly

        self._tele_sid = None

        self._tele_q = s_queue.Queue()
        self._tele_pushed = {}

        # allow the server to give us events to fire back on
        # reconnect so we can help re-init our server-side state
        self._tele_reminders = []

        if plex is None:
            plex = s_socket.Plex()
            self.onfini(plex.fini)

        self._tele_plex = plex
        self._tele_boss = s_async.Boss()

        self._tele_plex.on('link:sock:mesg', self._onLinkSockMesg)

        self._raw_on('tele:yield:init', self._onTeleYieldInit)
        self._raw_on('tele:yield:item', self._onTeleYieldItem)
        self._raw_on('tele:yield:fini', self._onTeleYieldFini)

        self._raw_on('tele:reminder', self._onTeleReminder)

        self._raw_on('fifo:xmit', self._onFifoXmit)
        self._raw_on('job:done', self._tele_boss.dist)
        self._raw_on('sock:gzip', self._onSockGzip)
        self._raw_on('tele:call', self._onTeleCall)
        self._raw_on('tele:sock:init', self._onTeleSockInit)

        self._tele_cthr = self.consume(self._tele_q)

        self._tele_ons = {}

        self._tele_sock = None
        self._tele_relay = relay  # LinkRelay()
        self._tele_link = relay.link
        self._tele_yields = {}
        self._tele_csides = {}
        self._tele_reflect = None

        # obj name is path minus leading "/"
        self._tele_name = relay.link[1].get('path')[1:]

        if sock is None:
            sock = self._tele_relay.connect()

        self._initTeleSock(sock=sock)
コード例 #10
0
ファイル: db.py プロジェクト: thpatel/synapse
    def __init__(self, size, ctor):
        s_eventbus.EventBus.__init__(self)

        self.size = size
        self.iden = s_common.guid()

        self.dbque = s_queue.Queue()
        self.onfini(self.dbque.fini)

        self.wlock = threading.Lock()
        self.xacts = [self._initXact(ctor()) for i in range(size)]
コード例 #11
0
    def test_queue_timeout(self):
        q = s_queue.Queue()
        q.put(1)

        self.eq(q.slice(1, timeout=0.001), [1])
        self.raises(s_exc.TimeOut, q.slice, 1, timeout=0.001)

        q.put(1)

        self.eq(q.get(timeout=0.001), 1)
        self.raises(s_exc.TimeOut, q.slice, 1, timeout=0.001)
コード例 #12
0
    def _onTeleYieldInit(self, mesg):
        jid = mesg[1].get('jid')
        iden = mesg[1].get('iden')

        que = s_queue.Queue()
        self._tele_yields[iden] = que

        def onfini():
            self._tele_yields.pop(iden,None)
            self._txTeleSock('tele:yield:fini', iden=iden)

        que.onfini(onfini)
        self._tele_boss.done(jid,que)
コード例 #13
0
ファイル: telepath.py プロジェクト: e2-ibm/synapse
    def __init__(self, relay, plex=None, sock=None):

        s_eventbus.EventBus.__init__(self)
        self.onfini(self._onProxyFini)

        # NOTE: the _tele_ prefixes are designed to prevent accidental
        #       derefs with overlapping names from working correctly

        self._tele_sid = None

        self._tele_q = s_queue.Queue()
        self._tele_pushed = {}

        if plex is None:
            plex = s_socket.Plex()

        self._tele_plex = plex
        self._tele_boss = s_async.Boss()

        self._tele_plex.on('link:sock:mesg', self._onLinkSockMesg)

        self._raw_on('tele:yield:init', self._onTeleYieldInit)
        self._raw_on('tele:yield:item', self._onTeleYieldItem)
        self._raw_on('tele:yield:fini', self._onTeleYieldFini)

        self._raw_on('job:done', self._tele_boss.dist)
        self._raw_on('sock:gzip', self._onSockGzip)
        self._raw_on('tele:call', self._onTeleCall)

        poolmax = relay.getLinkProp('poolmax', -1)
        poolsize = relay.getLinkProp('poolsize', 0)

        self._tele_cthr = self.consume(self._tele_q)
        self._tele_pool = s_threads.Pool(size=poolsize, maxsize=poolmax)

        self._tele_ons = {}

        self._tele_sock = None
        self._tele_relay = relay  # LinkRelay()
        self._tele_link = relay.link
        self._tele_yields = {}
        self._tele_csides = {}
        self._tele_reflect = None

        # obj name is path minus leading "/"
        self._tele_name = relay.link[1].get('path')[1:]

        if sock is None:
            sock = self._tele_relay.connect()

        self._initTeleSock(sock=sock)
コード例 #14
0
    def __init__(self, relay, plex=None):

        s_eventbus.EventBus.__init__(self)
        self.onfini(self._onProxyFini)

        # NOTE: the _tele_ prefixes are designed to prevent accidental
        #       derefs with overlapping names from working correctly

        self._tele_sid = None
        self._tele_pki = None

        self._tele_q = s_queue.Queue()
        self._tele_pushed = {}

        if plex == None:
            plex = s_socket.Plex()

        self._tele_plex = plex
        self._tele_boss = s_async.Boss()

        self._raw_on('tele:yield:init', self._onTeleYieldInit)
        self._raw_on('tele:yield:item', self._onTeleYieldItem)
        self._raw_on('tele:yield:fini', self._onTeleYieldFini)

        self._raw_on('job:done', self._tele_boss.dist)
        self._raw_on('tele:call', self._onTeleCall)

        poolmax = relay.getLinkProp('poolmax', -1)
        poolsize = relay.getLinkProp('poolsize', 0)

        self._tele_cthr = self.consume(self._tele_q)
        self._tele_pool = s_threads.Pool(size=poolsize, maxsize=poolmax)

        self._tele_ons = set()
        self._tele_sock = None
        self._tele_relay = relay  # LinkRelay()
        self._tele_yields = {}

        # obj name is path minus leading "/"
        self._tele_name = relay.link[1].get('path')[1:]

        if relay.getLinkProp('pki'):

            #TODO pkiurl

            self._tele_pki = relay.getLinkProp('pkistor')
            if self._tele_pki == None:
                self._tele_pki = s_pki.getUserPki()

        self._initTeleSock()
コード例 #15
0
    def test_queue_slice(self):
        q = s_queue.Queue()

        q.put(1)
        q.put(2)
        q.put(3)
        q.put(4)

        q.done()

        retn = []

        for slic in q.slices(2):
            retn.append(tuple(slic))

        self.eq(tuple(retn), ((1, 2), (3, 4)))
コード例 #16
0
ファイル: net.py プロジェクト: reign-git/synapse
    def setq(self):
        '''
        Set this Chan to using a Queue for rx.
        '''

        self._chan_rxq = s_queue.Queue()

        def rx(link, mesg):
            self._chan_rxq.put(mesg)

        def rxfini(mesg):
            self._chan_rxq.done()

        self.onrx(rx)

        self.on('rx:fini', rxfini)
        self.onfini(self._chan_rxq.done)
コード例 #17
0
    def test_queue_base(self):
        q = s_queue.Queue()

        self.len(0, q)
        self.eq(q.size(), 0)

        q.put('woot')

        self.len(1, q)
        self.eq(q.size(), 1)

        self.eq(q.get(), 'woot')
        self.raises(s_exc.TimeOut, q.get, timeout=0.1)

        self.len(0, q)
        self.eq(q.size(), 0)

        q.fini()
コード例 #18
0
    def test_queue_base(self):
        q = s_queue.Queue()

        self.len(0, q)
        self.eq(q.size(), 0)

        q.put('woot')

        self.len(1, q)
        self.eq(q.size(), 1)

        self.eq(q.get(), 'woot')
        self.none(q.get(timeout=0.1))

        self.len(0, q)
        self.eq(q.size(), 0)

        q.fini()
コード例 #19
0
ファイル: cryotank.py プロジェクト: rjammala/synapse
    def __init__(self, cryotank: CryoTank) -> None:
        '''
        Create an indexer

        Args:
            cryotank: the cryotank to index
        Returns:
            None
        '''
        self.cryotank = cryotank
        ebus = cryotank
        self._worker = threading.Thread(target=self._workerloop,
                                        name='CryoTankIndexer')
        path = s_common.gendir(cryotank.dirn, 'cryo_index.lmdb')
        cryotank_map_size = cryotank.lenv.info()['map_size']
        self._dbenv = lmdb.open(path,
                                writemap=True,
                                metasync=False,
                                max_readers=8,
                                max_dbs=4,
                                map_size=cryotank_map_size)
        # iid, v -> offset table
        self._idxtbl = self._dbenv.open_db(b'indices', dupsort=True)
        # offset, iid -> normalized prop
        self._normtbl = self._dbenv.open_db(b'norms')
        self._to_delete = {}  # type: Dict[str, int]
        self._workq = s_queue.Queue()
        # A dict of propname -> MetaEntry
        self.model = s_datamodel.Model()
        self._meta = _IndexMeta(self.model, self._dbenv)
        self._next_offset = self._meta.lowestProgress()
        self._chunk_sz = 1000  # < How many records to read at a time
        self._remove_chunk_sz = 1000  # < How many index entries to remove at a time
        ebus.on('cryotank:puts', self._onData)

        self._worker.start()

        def _onfini():
            self._workq.done()
            self._worker.join(self.MAX_WAIT_S)
            self._dbenv.close()

        ebus.onfini(_onfini)
コード例 #20
0
    def __init__(self, size=3, maxsize=None):
        EventBus.__init__(self)

        self.workq = s_queue.Queue()

        self._pool_lock = threading.Lock()
        self._pool_avail = 0

        if maxsize is None:
            maxsize = size

        self._pool_maxsize = maxsize

        self._pool_threads = {}

        self.onfini(self._onPoolFini)

        for i in range(size):
            self._fire_thread(self._run_work)
コード例 #21
0
    def test_queue_iter(self):
        results = []
        data = [1, 2, 3, 4, 5]
        evt = threading.Event()

        q = s_queue.Queue()
        [q.put(item) for item in data]

        @firethread
        def finisoon():
            evt.wait()
            q.fini()

        thr = finisoon()
        for i, item in enumerate(q, 1):
            results.append(item)
            if i == len(data):
                evt.set()
        thr.join()

        self.true(q.isfini)
        self.eq(data, results)
コード例 #22
0
    def test_queue_getn(self):
        q = s_queue.Queue()
        data = [0, 1, 2, 3, 4]

        [q.put(d) for d in data]

        for i in range(5):
            retn = q.getn()
            self.true(retn[0])
            self.eq(retn[1], i)

        retn = q.getn(0.01)
        self.false(retn[0])
        self.eq(retn[1][0], 'TimeOut')

        q.done()
        retn = q.getn(0.01)
        self.false(retn[0])
        self.eq(retn[1][0], 'IsFini')
        self.true(q.isfini)

        retn = q.getn(0.01)
        self.false(retn[0])
        self.eq(retn[1][0], 'IsFini')
コード例 #23
0
    def test_queue_exit(self):
        q = s_queue.Queue()
        evt = threading.Event()
        data = [1, 2, 3, 4, 5]
        results = []

        @firethread
        def nommer():
            evt.wait()
            while True:
                obj = q.get(timeout=1)
                if obj is not None:
                    results.append(obj)
                else:
                    break

        thr = nommer()
        with q:
            [q.put(item) for item in data]
            evt.set()
        thr.join()

        self.true(q.isfini)
        self.eq(data, results)
コード例 #24
0
 def openLogFd(self, opts):
     opath = self.locs.get('log:fp')
     if opath:
         self.printf('Must call --off to disable current file before starting a new file.')
         return
     fmt = opts.get('format')
     path = opts.get('path')
     splice_only = opts.get('splices-only')
     if not path:
         ts = s_time.repr(s_common.now(), True)
         fn = f'storm_{ts}.{fmt}'
         path = s_common.getSynPath('stormlogs', fn)
     self.printf(f'Starting logfile at [{path}]')
     queue = s_queue.Queue()
     fd = s_common.genfile(path)
     # Seek to the end of the file. Allows a user to append to a file.
     fd.seek(0, 2)
     self.locs['log:fp'] = path
     self.locs['log:fd'] = fd
     self.locs['log:fmt'] = fmt
     self.locs['log:queue'] = queue
     self.locs['log:thr'] = self.queueLoop()
     self.locs['log:splicesonly'] = splice_only
     self._cmd_cli.on('storm:mesg', self.onStormMesg)
コード例 #25
0
ファイル: persist.py プロジェクト: mari0d/synapse
    def items(self, off):
        '''
        Yield (nextoff,object) tuples from the file backlog and real-time
        once caught up.

        Args:
            off (int): Starting offset to use when unpacking objects from the
                       Dir object.

        Examples:
            Iterate over the items in a file and do stuff with them::

                for noff, item in pers.items(0):
                    dostuff(item)

        Notes:
            This is a legitimate yield generator; it may not be used across
            a Telepath Proxy.

            The offset yielded by this if a relative offset, computed from
            the base of the persist file and the input offset.  It should not
            be considered an absolute offset value.

        Yields:
            ((int, object)): A tuple containing the relative offset of the
             unpacked object and the unpacked object itself.
        '''
        que = s_queue.Queue()
        unpk = msgpack.Unpacker(use_list=0, encoding='utf8')

        if self.files[0].opts.get('baseoff') > off:
            raise Exception('Too Far Back')  # FIXME

        # a bit of a hack to get lengths from msgpack Unpacker
        data = {'next': 0}

        def calcsize(b):
            data['next'] += len(b)

        for pers in self.files:

            base = pers.opts.get('baseoff')

            # do we skip this file?
            filemax = base + pers.size
            if filemax < off:
                continue

            while True:

                foff = off - base

                byts = pers.readoff(foff, blocksize)

                # file has been closed...
                if byts is None:
                    return

                # check if we're at the edge
                if not byts:

                    with self.lock:

                        # newp! ( break out to next file )
                        if self.last != pers:
                            break

                        # if there are byts now, we whiffed
                        # the check/set race.  Go around again.
                        byts = pers.readoff(foff, blocksize)
                        if byts is None:
                            return

                        if not byts:
                            self.queues.append(que)
                            break

                unpk.feed(byts)

                try:

                    while True:
                        item = unpk.unpack(write_bytes=calcsize)
                        yield data['next'], item

                except msgpack.exceptions.OutOfData:
                    pass

                off += len(byts)

        # we are now a queued real-time pump
        try:

            # this will break out on fini...
            for x in que:
                yield x

        finally:
            self.queues.remove(que)
            que.fini()
コード例 #26
0
ファイル: persist.py プロジェクト: TobiNakamura/synapse
    def items(self, off):
        '''
        Yield (nextoff,item) tuples from the file backlog and real-time
        once caught up.

        NOTE: because this is a legitimate yield generator it may not be
              used across a telepath proxy.

        Example:

            for noff,item in pers.items(0):
                stuff(item)

        '''
        que = s_queue.Queue()
        unpk = msgpack.Unpacker(use_list=0, encoding='utf8')

        if self.files[0].opts.get('baseoff') > off:
            raise Exception('Too Far Back') # FIXME

        # a bit of a hack to get lengths from msgpack Unpacker
        data = {'next': 0}
        def calcsize(b):
            data['next'] += len(b)

        for pers in self.files:

            base = pers.opts.get('baseoff')

            # do we skip this file?
            filemax = base + pers.size
            if filemax < off:
                continue

            while True:

                foff = off - base

                byts = pers.readoff(foff, blocksize)

                # file has been closed...
                if byts is None:
                    return

                # check if we're at the edge
                if not byts:

                    with self.lock:

                        # newp! ( break out to next file )
                        if self.last != pers:
                            break

                        # if there are byts now, we whiffed
                        # the check/set race.  Go around again.
                        byts = pers.readoff(foff, blocksize)
                        if byts is None:
                            return

                        if not byts:
                            self.queues.append(que)
                            break

                unpk.feed(byts)

                try:

                    while True:
                        item = unpk.unpack(write_bytes=calcsize)
                        yield data['next'], item

                except msgpack.exceptions.OutOfData:
                    pass

                off += len(byts)

        # we are now a queued real-time pump
        try:

            # this will break out on fini...
            for x in que:
                yield x

        finally:
            self.queues.remove(que)
            que.fini()