コード例 #1
0
    def xreader():
        in_queue = Queue(buffer_size)
        out_queue = Queue(buffer_size)
        out_order = [0]
        # start a read worker in a thread
        target = order_read_worker if order else read_worker
        t = Thread(target=target, args=(reader, in_queue))
        t.daemon = True
        t.start()
        # start several handle_workers
        target = order_handle_worker if order else handle_worker
        args = (in_queue, out_queue, mapper,
                out_order) if order else (in_queue, out_queue, mapper)
        workers = []
        for i in range(process_num):
            worker = Thread(target=target, args=args)
            worker.daemon = True
            workers.append(worker)
        for w in workers:
            w.start()

        sample = out_queue.get()
        while not isinstance(sample, XmapEndSignal):
            yield sample
            sample = out_queue.get()
        finish = 1
        while finish < process_num:
            sample = out_queue.get()
            if isinstance(sample, XmapEndSignal):
                finish += 1
            else:
                yield sample
コード例 #2
0
ファイル: model.py プロジェクト: yili9111/nextgisweb
    def put_tile(self, tile, img):
        params = dict(tile=tile,
                      img=None if img is None else img.copy(),
                      uuid=self.uuid.hex,
                      db_path=self.tilestor_path)

        writer = TilestorWriter.getInstance()

        if self.async_writing:
            answer_queue = Queue(maxsize=1)
            params['answer_queue'] = answer_queue

        try:
            writer.put(params)
        except TileWriterQueueException as exc:
            _logger.error(
                "Failed to put tile {} to tile cache for resource {}. {}".
                format(params['tile'], self.resource_id, exc.message),
                exc_info=True)

        if self.async_writing:
            try:
                answer_queue.get()
            except Exception:
                pass
コード例 #3
0
def test_blocked_cancel(any_executor, request):
    if "sync" in request.node.name:
        skip("test not applicable with sync executor")

    to_fn = Queue(1)
    from_fn = Queue(1)

    def fn():
        to_fn.get()
        from_fn.put(None)
        to_fn.get()
        return 123

    future = any_executor.submit(fn)

    # Wait until fn is certainly running
    to_fn.put(None)
    from_fn.get()

    # Since the function is in progress,
    # it should NOT be possible to cancel it
    assert_that(not future.cancel(), str(future))

    # assert_soon since, by blocking on from_fn.get(), we only guarantee
    # that the future is running from the innermost executor/future's point
    # of view, but this may not have propagated to the outermost future yet
    assert_soon(lambda: assert_that(future.running(), str(future)))

    # Re-check cancel after running() is true
    assert_that(not future.cancel(), str(future))

    # Let fn proceed and the future should be able to complete
    to_fn.put(None)
    assert_that(future.result(TIMEOUT), equal_to(123))
コード例 #4
0
class ThreadManager(six.with_metaclass(Singleton, object)):
    """ ThreadManager provides thread on demand """

    NUM_THREAD = 4  # Default number of threads

    def __init__(self, num_thread=NUM_THREAD):
        """ Create num_thread Threads """

        self.queue = Queue()

        self.thread_list = []

        for i in range(num_thread):
            t = Thread(target=worker, args=(self.queue, ))
            t.setDaemon(True)
            t.start()

            self.thread_list.append(t)

    def add_task(self, func, params):
        """
        Add a task to perform
        :param func: function to call
        :param params : tuple of parameters
        """

        self.queue.put((func, params))

    def clear(self):
        """ clear pending task """

        while (not self.queue.empty()):
            self.queue.get()
コード例 #5
0
def test(add_after_start, generator):
    queue = Queue()
    e = {}
    em = element_updater.ElementUpdater()

    async def fn(element):
        element.setdefault('called', []).append(True)
        queue.put(True)

    count = 1

    if generator:
        count = 2
        old_fn = fn

        async def fn(element):
            for _ in range(2):
                await old_fn(element)
                yield

    updater = element_updater.Updater(element=e, updater=fn)
    if add_after_start:
        em.start()
        em.add(updater)
    else:
        em.add(updater)
        em.start()

    for _ in range(count):
        queue.get(timeout=3)
    assert e['called'] == [True] * count
コード例 #6
0
def generator_to_async_generator(get_iterable):
    """
    Turn a generator or iterable into an async generator.

    This works by running the generator in a background thread.
    The new async generator will yield both `Future` objects as well
    as the original items.

    :param get_iterable: Function that returns a generator or iterable when
        called.
    """
    q = Queue()
    f = Future()
    l = RLock()
    quitting = False

    def runner():
        """
        Consume the generator in background thread.
        When items are received, they'll be pushed to the queue and the
        Future is set.
        """
        for item in get_iterable():
            with l:
                q.put(item)
                if not f.done():
                    f.set_result(None)

            # When this async generator was cancelled (closed), stop this
            # thread.
            if quitting:
                break
        with l:
            if not f.done():
                f.set_result(None)

    # Start background thread.
    done_f = run_in_executor(runner, _daemon=True)

    try:
        while not done_f.done():
            # Wait for next item(s): yield Future.
            yield From(f)

            # Items received. Yield all items so far.
            with l:
                while not q.empty():
                    yield AsyncGeneratorItem(q.get())

                f = Future()

        # Yield final items.
        while not q.empty():
            yield q.get()

    finally:
        # When this async generator is closed (GeneratorExit exception, stop
        # the background thread as well. - we don't need that anymore.)
        quitting = True
コード例 #7
0
def generator_to_async_generator(get_iterable):
    """
    Turn a generator or iterable into an async generator.

    This works by running the generator in a background thread.
    The new async generator will yield both `Future` objects as well
    as the original items.

    :param get_iterable: Function that returns a generator or iterable when
        called.
    """
    q = Queue()
    f = Future()
    l = RLock()
    quitting = False

    def runner():
        """
        Consume the generator in background thread.
        When items are received, they'll be pushed to the queue and the
        Future is set.
        """
        for item in get_iterable():
            with l:
                q.put(item)
                if not f.done():
                    f.set_result(None)

            # When this async generator was cancelled (closed), stop this
            # thread.
            if quitting:
                break
        with l:
            if not f.done():
                f.set_result(None)

    # Start background thread.
    done_f = run_in_executor(runner, _daemon=True)

    try:
        while not done_f.done():
            # Wait for next item(s): yield Future.
            yield From(f)

            # Items received. Yield all items so far.
            with l:
                while not q.empty():
                    yield AsyncGeneratorItem(q.get())

                f = Future()

        # Yield final items.
        while not q.empty():
            yield q.get()

    finally:
        # When this async generator is closed (GeneratorExit exception, stop
        # the background thread as well. - we don't need that anymore.)
        quitting = True
コード例 #8
0
ファイル: reactor.py プロジェクト: testing-cabal/txfixtures
 def _maybeFixReactorThreadRace(self):  # pragma: no cover
     # XXX For some obscure reason, this is needed in order to have the
     #     reactor properly wait for the shutdown sequence. It's probably
     #     a race between this thread and the reactor thread. Needs
     #     investigation.
     spin = Queue()
     self.reactor.callFromThread(self.reactor.callLater, 0, spin.put, None)
     spin.get(timeout=self.timeout)
コード例 #9
0
def run_load_stats(state, *args, **kwargs):
    """ Run load_stats() in a thread """
    queue = Queue()
    thread_args = [state, queue]
    thread_args.extend(args)
    thread = Thread(target=load_stats, args=thread_args, kwargs=kwargs)
    thread.daemon = True
    thread.start()
    # Wait until the thread is definitely started
    queue.get(True)
    return queue
コード例 #10
0
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(
                status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(
                status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    def wait_for_connect(self):
        if not self.connected_event.is_set():
            self.connected_event.wait()
        else:
            raise Exception("the instance is already connected")

    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            self.disconnected_event.wait()
        else:
            raise Exception("the instance is already disconnected")

    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.message_queue.get()
            self.message_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue

    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.presence_queue.get()
            self.presence_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue
コード例 #11
0
 def data_reader():
     r = reader()
     q = Queue(maxsize=size)
     t = Thread(target=read_worker, args=(
         r,
         q,
     ))
     t.daemon = True
     t.start()
     e = q.get()
     while e != end:
         yield e
         e = q.get()
コード例 #12
0
ファイル: pubnub.py プロジェクト: pubnub/python
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    def wait_for_connect(self):
        if not self.connected_event.is_set():
            self.connected_event.wait()
        else:
            raise Exception("the instance is already connected")

    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            self.disconnected_event.wait()
        else:
            raise Exception("the instance is already disconnected")

    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.message_queue.get()
            self.message_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue

    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.presence_queue.get()
            self.presence_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue
コード例 #13
0
class WinPty(object):
    def __init__(self, stdin):
        self._s = stdin
        self._q = Queue()

        def _read_next_char(stdin, queue):
            while True:
                char = stdin.read(1)  # potentially blocking read
                if char:
                    queue.put(char)
                else:
                    break

        self._t = Thread(target=_read_next_char, args=(self._s, self._q))
        self._t.daemon = True
        self._t.start()  # read characters asynchronously from stdin

    def read(self, blksize=-1, timeout=1):
        buf = StringIO()
        count = 0
        try:
            while count < blksize or blksize == -1:
                next = self._q.get(block=timeout is not None, timeout=timeout)
                buf.write(next)
                count = count + 1
        except Empty:
            pass
        return buf.getvalue()
コード例 #14
0
ファイル: conftest.py プロジェクト: Zeto-Ltd/replisome
class Called(object):
    def __init__(self, obj, attr, request):
        self.obj = obj
        self.attr = attr
        self.request = request
        self._queue = Queue()

        self.orig = getattr(self.obj, self.attr)
        setattr(self.obj, self.attr, self._call)

    def restore(self):
        if hasattr(self, "orig"):
            setattr(self.obj, self.attr, self.orig)

    def _call(self, *args, **kwargs):
        try:
            rv = self.orig(*args, **kwargs)
        except Exception as e:
            self._queue.put((args, kwargs, e))
            raise
        else:
            self._queue.put((args, kwargs, rv))

        return rv

    def get(self, timeout=1):
        assert timeout
        try:
            rv = self._queue.get(timeout=timeout)
        except Empty:
            pytest.fail("no item received within %s seconds" % timeout)

        if isinstance(rv[2], Exception):
            raise rv[2]
コード例 #15
0
class FrameSaver( threading.Thread ):
	def __init__( self ):
		threading.Thread.__init__( self )
		self.daemon = True
		self.name = 'FrameSaver'
		self.reset()
	
	def reset( self ):
		self.queue = Queue()
	
	def run( self ):
		self.reset()
		while 1:
			message = self.queue.get()
			if   message[0] == 'Save':
				cmd, fileName, bib, t, frame = message
				#sys.stderr.write( 'save' )
				PhotoFinish.SavePhoto( fileName, bib, t, frame )
				self.queue.task_done()
			elif message[0] == 'Terminate':
				self.queue.task_done()
				self.reset()
				break
	
	def stop( self ):
		self.queue.put( ['Terminate'] )
		self.join()
	
	def save( self, fileName, bib, t, frame ):
		self.queue.put( ['Save', fileName, bib, t, frame] )
コード例 #16
0
ファイル: util.py プロジェクト: xeddmc/py-ipv8
def blockingCallFromThread(reactor, f, *args, **kwargs):
    """
    Improved version of twisted's blockingCallFromThread that shows the complete
    stacktrace when an exception is raised on the reactor's thread.
    If being called from the reactor thread already, just return the result of execution of the callable.
    """
    if isInIOThread():
        return f(*args, **kwargs)
    else:
        queue = Queue()

        def _callFromThread():
            result = defer.maybeDeferred(f, *args, **kwargs)
            result.addBoth(queue.put)

        reactor.callFromThread(_callFromThread)
        result = queue.get()
        if isinstance(result, Failure):
            other_thread_tb = traceback.extract_tb(result.getTracebackObject())
            this_thread_tb = traceback.extract_stack()
            logger.error(
                "Exception raised on the reactor's thread %s: \"%s\".\n Traceback from this thread:\n%s\n"
                " Traceback from the reactor's thread:\n %s",
                result.type.__name__, result.getErrorMessage(),
                ''.join(traceback.format_list(this_thread_tb)),
                ''.join(traceback.format_list(other_thread_tb)))
            result.raiseException()
        return result
コード例 #17
0
class Consumer(object):
    """ A consumer is registered to the read thread before a command is issued.  If an output
    matches the consumer, the output will unblock the get() caller. """
    def __init__(self, cmd, cid):
        self.cmd = cmd  # type: MasterCommandSpec
        self.cid = cid  # type: int
        self.__queue = Queue()

    def get_prefix(self):
        """ Get the prefix of the answer from the master. """
        return self.cmd.output_action + bytearray([self.cid])

    def consume(self, data, partial_result):
        # type: (bytearray, Optional[Result]) -> Tuple[int, Result, bool]
        """ Consume data. """
        return self.cmd.consume_output(data, partial_result)

    def get(self, timeout):
        """
        Wait until the master replies or the timeout expires.

        :param timeout: timeout in seconds
        :raises: :class`CommunicationTimedOutException` if master did not respond in time
        :returns: dict containing the output fields of the command
        """
        try:
            return self.__queue.get(timeout=timeout)
        except Empty:
            raise CommunicationTimedOutException()

    def deliver(self, output):
        # type: (Result) -> None
        """ Deliver output to the thread waiting on get(). """
        self.__queue.put(output)
コード例 #18
0
ファイル: test_equipped.py プロジェクト: wolfv/workerpool
    def test_equipped(self):
        """
        Created equipped worker that will use an internal Counter resource to
        keep track of the job count.
        """
        results = Queue()

        def toolbox_factory():
            return Counter()

        def worker_factory(job_queue):
            return workerpool.EquippedWorker(job_queue, toolbox_factory)

        pool = workerpool.WorkerPool(1, worker_factory=worker_factory)

        # Run 10 jobs
        for i in range(10):
            j = CountJob(results)
            pool.put(j)

        # Get 10 results
        for i in range(10):
            r = results.get()
            # Each result should be an incremented value
            self.assertEquals(r, i)

        pool.shutdown()
コード例 #19
0
ファイル: handler.py プロジェクト: swift-nav/libsbp
    class _SBPQueueIterator(six.Iterator):
        """
        Class for upstream iterators.  Implements callable interface for adding
        messages into the queue, and iterable interface for getting them out.
        """

        def __init__(self, maxsize):
            self._queue = Queue(maxsize)
            self._broken = False

        def __iter__(self):
            return self

        def __call__(self, msg, **metadata):
            self._queue.put((msg, metadata), False)

        def breakiter(self):
            self._broken = True
            self._queue.put(None, True, 1.0)

        def __next__(self):
            if self._broken and self._queue.empty():
                raise StopIteration
            m = self._queue.get(True)
            if self._broken and m is None:
                raise StopIteration
            return m
コード例 #20
0
ファイル: dbg_gdb.py プロジェクト: Stenean/voltron
        def inner(self, *args, **kwargs):
            if self.use_post_event:
                # create ephemeral queue
                q = Queue()

                # create an invocation that calls the decorated function
                class Invocation(object):
                    def __call__(killme):
                        # when the invocation is called, we call the function and stick the result into the queue
                        try:
                            res = func(self, *args, **kwargs)
                        except Exception as e:
                            # if we got an exception, just queue that instead
                            res = e
                        q.put(res)

                # post this invocation to be called on the main thread at the next opportunity
                gdb.post_event(Invocation())

                # now we wait until there's something in the queue, which indicates that the invocation has run and return
                # the result that was pushed onto the queue by the invocation
                res = q.get()

                # if we got an exception back from the posted event, raise it
                if isinstance(res, Exception):
                    raise res

                return res
            else:
                return func(self, *args, **kwargs)
コード例 #21
0
ファイル: iterators.py プロジェクト: 4Catalyzer/nolearn_utils
def make_buffer_for_iterator_with_thread(gen, n_workers, buffer_size):
    wait_time = 0.02
    generator_queue = Queue()
    _stop = threading.Event()

    def generator_task():
        while not _stop.is_set():
            try:
                if generator_queue.qsize() < buffer_size:
                    generator_output = next(gen)
                    generator_queue.put(generator_output)
                else:
                    time.sleep(wait_time)
            except (StopIteration, KeyboardInterrupt):
                _stop.set()
                return

    generator_threads = [threading.Thread(target=generator_task) for _ in range(n_workers)]
    for thread in generator_threads:
        thread.start()

    while not _stop.is_set() or not generator_queue.empty():
        if not generator_queue.empty():
            yield generator_queue.get()
        else:
            time.sleep(wait_time)
コード例 #22
0
ファイル: future.py プロジェクト: tamland/python-actors
class Future(object):
    def __init__(self):
        self._result = Queue(maxsize=1)
        self._success_callback = None
        self._failure_callback = None

    def get(self, timeout=None):
        """
        Return value on success, or raise exception on failure.
        """
        result = None
        try:
            result = self._result.get(True, timeout=timeout)
        except Empty:
            raise Timeout()

        if isinstance(result, Failure):
            six.reraise(*result.exc_info)
        else:
            return result

    def on_success(self, callback):
        self._success_callback = callback

    def on_failure(self, callback):
        self._failure_callback = callback
コード例 #23
0
ファイル: executor.py プロジェクト: tamland/python-actors
class Executor(object):
    _INTERRUPT = object()

    def __init__(self, num_workers=1):
        super(Executor, self).__init__()
        self._queue = Queue()
        self._workers = []

        for _ in range(num_workers):
            th = Thread(target=self._work)
            th.start()
            self._workers.append(th)

    def submit(self, task):
        self._queue.put(task)

    def shutdown(self):
        for _ in self._workers:
            self._queue.put(self._INTERRUPT)

    def join(self):
        for worker in self._workers:
            worker.join()

    def _work(self):
        while True:
            task = self._queue.get(block=True)
            if task is self._INTERRUPT:
                break
            try:
                task()
            except BaseException as e:
                logger.exception(e)
コード例 #24
0
    def acquireResource(self, namespace, name, lockType, timeout=None):
        """
        Acquire a resource synchronously.

        :returns: a reference to the resource.
        """
        if timeout is not None:
            try:
                timeout = int(timeout)
            except ValueError:
                raise TypeError("'timeout' must be number")

        resource = Queue()

        def callback(req, res):
            resource.put(res)

        request = self.registerResource(namespace, name, lockType, callback)
        request.wait(timeout)
        if not request.granted():
            try:
                request.cancel()
                raise RequestTimedOutError("Request timed out. Could not "
                                           "acquire resource '%s.%s'" %
                                           (namespace, name))
            except RequestAlreadyProcessedError:
                # We might have acquired the resource between 'wait' and
                # 'cancel'
                if request.canceled():
                    raise se.ResourceAcqusitionFailed()

        return resource.get()
コード例 #25
0
ファイル: test_remote.py プロジェクト: w4jda/pytest-xdist
class WorkerSetup:
    use_callback = False

    def __init__(self, request, testdir):
        self.request = request
        self.testdir = testdir
        self.events = Queue()

    def setup(self,):
        self.testdir.chdir()
        # import os ; os.environ['EXECNET_DEBUG'] = "2"
        self.gateway = execnet.makegateway()
        self.config = config = self.testdir.parseconfigure()
        putevent = self.use_callback and self.events.put or None

        class DummyMananger:
            specs = [0, 1]

        self.slp = WorkerController(DummyMananger, self.gateway, config, putevent)
        self.request.addfinalizer(self.slp.ensure_teardown)
        self.slp.setup()

    def popevent(self, name=None):
        while 1:
            if self.use_callback:
                data = self.events.get(timeout=WAIT_TIMEOUT)
            else:
                data = self.slp.channel.receive(timeout=WAIT_TIMEOUT)
            ev = EventCall(data)
            if name is None or ev.name == name:
                return ev
            print("skipping %s" % (ev,))

    def sendcommand(self, name, **kwargs):
        self.slp.sendcommand(name, **kwargs)
コード例 #26
0
def test_websocket_api():
    client = boto3.client('apigatewayv2', endpoint_url=TEST_APIGATEWAYV2_URL)
    queue = Queue()
    msg = {'action': 'test-action'}

    async def start_client(uri):
        async with websockets.connect(uri) as websocket:
            print('Sending message to websocket')
            await websocket.send(json.dumps(msg))
            print('Waiting for response message from websocket ...')
            result = await websocket.recv()
            print('Received message from websocket: %s' % result)
            queue.put(json.loads(result))

    apis = client.get_apis()['Items']
    api = [a for a in apis if 'localstack-websockets' in a['Name']][0]

    url = api['ApiEndpoint']
    print('Connecting to websocket URL %s' % url)
    asyncio.get_event_loop().run_until_complete(start_client(url))
    result = queue.get(timeout=3)
    result_body = result['body']
    result_body = json.loads(result_body)

    assert result_body == msg
コード例 #27
0
    def test_equipped(self):
        """
        Created equipped worker that will use an internal Counter resource to
        keep track of the job count.
        """
        results = Queue()

        def toolbox_factory():
            return Counter()

        def worker_factory(job_queue):
            return workerpool.EquippedWorker(job_queue, toolbox_factory)

        pool = workerpool.WorkerPool(1, worker_factory=worker_factory)

        # Run 10 jobs
        for i in range(10):
            j = CountJob(results)
            pool.put(j)

        # Get 10 results
        for i in range(10):
            r = results.get()
            # Each result should be an incremented value
            self.assertEquals(r, i)

        pool.shutdown()
コード例 #28
0
class Consumer(object):
    """
    A consumer is registered to the read thread before a command is issued.  If an output
    matches the consumer, the output will unblock the get() caller.
    """
    def __init__(self, command, cid):  # type: (CoreCommandSpec, int) -> None
        self.cid = cid
        self.command = command
        self._queue = Queue()  # type: Queue[Dict[str, Any]]

    def get_hash(self):  # type: () -> int
        """ Get an identification hash for this consumer. """
        return Toolbox.hash(CoreCommunicator.START_OF_REPLY +
                            bytearray([self.cid]) +
                            self.command.response_instruction)

    def consume(self, payload):  # type: (bytearray) -> None
        """ Consume payload. """
        data = self.command.consume_response_payload(payload)
        self._queue.put(data)

    def get(self, timeout):  # type: (Union[T_co, int]) -> Dict[str, Any]
        """
        Wait until the Core replies or the timeout expires.

        :param timeout: timeout in seconds
        :returns: dict containing the output fields of the command
        """
        try:
            return self._queue.get(timeout=timeout)
        except Empty:
            raise CommunicationTimedOutException(
                'No Core data received in {0}s'.format(timeout))
コード例 #29
0
def test_db(db_glob, num_thread):
    # python 2.7 does not support glob such as ".../**" to do recursively
    # to access all files under some directory, we have to implement ourselves.
    db = get_file_recursively(db_glob)

    # using python 3.5, a simple form:
    # db = glob(db_glob)

    print("{} contains {} items".format(db_glob, len(db)))

    index_queue = Queue()
    result_queue = Queue()                                
    workers = [Thread(target=worker, args=(db, index_queue, result_queue)) for _
                      in range(num_thread)]                                     
    for w in workers:                                                           
        w.start()                                                               
                                                                                
    t1 = time.time()                                                            
    for i in range(len(db)):                                                    
        index_queue.put(i)                                                      
    total_len = 0                                                               
    for i in range(len(db)):                                                    
        total_len += result_queue.get()                                         
    t2 = time.time() 

    for i in range(len(workers)):
        index_queue.put(None)
    
    print("{} processing {} items with {} threads uses {}s, avg {}/s, avg {}B/s, avg {}MiB/s".format(db_glob, len(db), num_thread, (t2 - t1),
                                                            len(db) / (t2 - t1), total_len / (t2 - t1), total_len / (t2 - t1) / 1024 / 1024))
コード例 #30
0
ファイル: batch_iterator.py プロジェクト: afcarl/dl-papers
class BufferedIterator(six.Iterator):
    def __init__(self, source, buffer_size=2):
        assert buffer_size >= 2, "minimum buffer size is 2"

        # The effective buffer size is one larger, because the generation
        # process will generate one extra element and block until there is room
        # in the buffer.
        self.buffer = Queue(maxsize=buffer_size - 1)

        def populate_buffer():
            try:
                for item in source:
                    self.buffer.put((None, item))
            except:
                self.buffer.put((sys.exc_info(), None))
            else:
                self.buffer.put(DONE)

        thread = threading.Thread(target=populate_buffer)
        thread.daemon = True
        thread.start()

    def __iter__(self):
        return self

    def __next__(self):
        value = self.buffer.get()
        if value is DONE:
            raise StopIteration()

        exc_info, data = value
        if exc_info:
            six.reraise(*exc_info)
        return data
コード例 #31
0
class Scheduler(object):
    def __init__(self):
        self.queue = Queue()
        self._filter_set = set()


    def add_request(self, request):
        if self._filter_request(request):
            self._filter_set.add(request.url)
            self.queue.put(request)


    def get_request(self):
        try:
            return self.queue.get(False)
        except:
            return False


    def _filter_request(self, request):
        """
            请求去重,并返回判断结果
        """
        # 如果请求的url地址不再去重集合中,那么返回True,表示允许添加到请求队列中
        if request.url not in self._filter_set:
            return True
        else:
            # 否则,表示重复, 不允许添加
            return False
コード例 #32
0
class Executor(object):
    _INTERRUPT = object()

    def __init__(self, num_workers=1):
        super(Executor, self).__init__()
        self._queue = Queue()
        self._workers = []

        for _ in range(num_workers):
            th = Thread(target=self._work)
            th.start()
            self._workers.append(th)

    def submit(self, task):
        self._queue.put(task)

    def shutdown(self):
        for _ in self._workers:
            self._queue.put(self._INTERRUPT)

    def join(self):
        for worker in self._workers:
            worker.join()

    def _work(self):
        while True:
            task = self._queue.get(block=True)
            if task is self._INTERRUPT:
                break
            try:
                task()
            except BaseException as e:
                logger.exception(e)
コード例 #33
0
class Scheduler():
    '''完成调取器模块的封装'''
    def __init__(self):
        self.queue = Queue()

    def add_request(self, request):
        '''
        实现添加request到队列中
        :param request: 请求对象
        :return: None
        '''
        # url去重
        # self._filter_request(request)
        self.queue.put(request)

    def get_request(self):
        '''
        实现获取队列中的request对象
        :return: 请求对象
        '''
        return self.queue.get()

    def _filter_request(self, request):
        '''
        实现对请求对象的去重
        :param request: 请求对象
        :return: bool
        '''
        pass
コード例 #34
0
ファイル: __init__.py プロジェクト: jaxon1/dronekit-sitl
class NonBlockingStreamReader:
    def __init__(self, stream):
        '''
        stream: the stream to read from.
                Usually a process' stdout or stderr.
        '''

        self._s = stream
        self._q = Queue()

        def _populateQueue(stream, queue):
            '''
            Collect lines from 'stream' and put them in 'quque'.
            '''

            while True:
                line = stream.readline()
                if line:
                    queue.put(line)
                else:
                    break

        self._t = Thread(target = _populateQueue,
                         args = (self._s, self._q))
        self._t.daemon = True
        self._t.start() #start collecting lines from the stream

    def readline(self, timeout = None):
        try:
            return self._q.get(block = timeout is not None,
                               timeout = timeout)
        except Empty:
            return None
コード例 #35
0
def run_with_timeout_and_stack(request, timeout):
    '''
    interrupts evaluation after a given time period. provides a suitable stack environment.
    '''

    # only use set_thread_stack_size if max recursion depth was changed via the environment variable
    # MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in
    # order to be able to set the thread stack size.

    if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH:
        set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH))
    elif timeout is None:
        return request()

    queue = Queue(maxsize=1)   # stores the result or exception
    thread = Thread(target=_thread_target, args=(request, queue))
    thread.start()

    thread.join(timeout)
    if thread.is_alive():
        raise TimeoutInterrupt()

    success, result = queue.get()
    if success:
        return result
    else:
        six.reraise(*result)
コード例 #36
0
    def iter_entries(handle):

        cd = pycdlib.PyCdlib()

        if hasattr(handle, 'seek') and handle.seekable():
            handle.seek(0)
            cd.open_fp(handle)
        else:
            cd.open(handle)

        rock_ridge = cd.rock_ridge is not None
        joliet = cd.joliet_vd is not None
        joliet_only = joliet and not rock_ridge

        directories = Queue()
        directories.put(cd.get_entry('/', joliet_only))

        while not directories.empty():
            directory = directories.get()

            for child in directory.children:
                if not child.is_dot() and not child.is_dotdot():
                    if child.is_dir():
                        directories.put(child)
                    yield child
コード例 #37
0
ファイル: __init__.py プロジェクト: abhiTronix/dronekit-sitl
class NonBlockingStreamReader:
    def __init__(self, stream):
        '''
        stream: the stream to read from.
                Usually a process' stdout or stderr.
        '''

        self._s = stream
        self._q = Queue()

        def _populateQueue(stream, queue):
            '''
            Collect lines from 'stream' and put them in 'quque'.
            '''

            while True:
                line = stream.readline()
                if line:
                    queue.put(line)
                else:
                    break

        self._t = Thread(target=_populateQueue, args=(self._s, self._q))
        self._t.daemon = True
        self._t.start()  #start collecting lines from the stream

    def readline(self, timeout=None):
        try:
            return self._q.get(block=timeout is not None, timeout=timeout)
        except Empty:
            return None
コード例 #38
0
ファイル: client.py プロジェクト: teloniusz/zeus
def main_random_cast(voter_url_file, plaintexts_file, nr_threads=2):
    if exists(plaintexts_file):
        m = "%s: file exists, will not overwrite" % (plaintexts_file, )
        raise ValueError(m)

    with open(voter_url_file) as f:
        voter_urls = f.read().splitlines()
    total = len(voter_urls)
    inqueue = Queue(maxsize=total)
    outqueue = Queue(maxsize=total)
    for i, voter_url in enumerate(voter_urls):
        inqueue.put((i, total, voter_url))

    #main_random_cast_thread(queue)
    threads = [
        Thread(target=main_random_cast_thread, args=(inqueue, outqueue))
        for _ in range(nr_threads)
    ]

    for t in threads:
        t.daemon = True
        t.start()

    plaintexts = [outqueue.get() for _ in range(total)]
    with open(plaintexts, 'w') as f:
        f.write(repr(plaintexts))

    for t in threads:
        t.join()
コード例 #39
0
ファイル: parallel.py プロジェクト: asiqq23/compose
def parallel_execute_stream(objects, func, get_deps):
    if get_deps is None:
        get_deps = _no_deps

    results = Queue()
    state = State(objects)

    while not state.is_done():
        for event in feed_queue(objects, func, get_deps, results, state):
            yield event

        try:
            event = results.get(timeout=0.1)
        except Empty:
            continue
        # See https://github.com/docker/compose/issues/189
        except thread.error:
            raise ShutdownException()

        obj, _, exception = event
        if exception is None:
            log.debug('Finished processing: {}'.format(obj))
            state.finished.add(obj)
        else:
            log.debug('Failed: {}'.format(obj))
            state.failed.add(obj)

        yield event
コード例 #40
0
def parallel_execute_stream(objects, func, get_deps):
    if get_deps is None:
        get_deps = _no_deps

    results = Queue()
    state = State(objects)

    while not state.is_done():
        for event in feed_queue(objects, func, get_deps, results, state):
            yield event

        try:
            event = results.get(timeout=0.1)
        except Empty:
            continue
        # See https://github.com/docker/compose/issues/189
        except thread.error:
            raise ShutdownException()

        obj, _, exception = event
        if exception is None:
            log.debug('Finished processing: {}'.format(obj))
            state.finished.add(obj)
        else:
            log.debug('Failed: {}'.format(obj))
            state.failed.add(obj)

        yield event
コード例 #41
0
ファイル: evaluation.py プロジェクト: mathics/Mathics
def run_with_timeout_and_stack(request, timeout):
    '''
    interrupts evaluation after a given time period. provides a suitable stack environment.
    '''

    # only use set_thread_stack_size if max recursion depth was changed via the environment variable
    # MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in
    # order to be able to set the thread stack size.

    if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH:
        set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH))
    elif timeout is None:
        return request()

    queue = Queue(maxsize=1)   # stores the result or exception
    thread = Thread(target=_thread_target, args=(request, queue))
    thread.start()

    thread.join(timeout)
    if thread.is_alive():
        raise TimeoutInterrupt()

    success, result = queue.get()
    if success:
        return result
    else:
        six.reraise(*result)
コード例 #42
0
    class _SBPQueueIterator(six.Iterator):
        """
        Class for upstream iterators.  Implements callable interface for adding
        messages into the queue, and iterable interface for getting them out.
        """
        def __init__(self, maxsize):
            self._queue = Queue(maxsize)
            self._broken = False

        def __iter__(self):
            return self

        def __call__(self, msg, **metadata):
            self._queue.put((msg, metadata), False)

        def breakiter(self):
            self._broken = True
            self._queue.put(None, True, 1.0)

        def __next__(self):
            if self._broken and self._queue.empty():
                raise StopIteration
            m = self._queue.get(True)
            if self._broken and m is None:
                raise StopIteration
            return m
コード例 #43
0
ファイル: TagGroup.py プロジェクト: esitarski/CrossMgr
class TagGroup( object ):
	'''
		Process groups of tag reads and return the best time estimated using quadratic regression.
		Stray reads are also detected if there is no quiet period for the tag.
		The first read time of each stray read is returned.
	'''
	def __init__( self ):
		self.q = Queue()
		self.tagInfo = {}
		
	def add( self, antenna, tag, t, db ):
		self.q.put((antenna, tag, t, db))

	def flush( self ):
		# Process all waiting reads.
		while 1:
			try:
				antenna, tag, t, db = self.q.get(False)
			except Empty:
				break
			try:
				self.tagInfo[tag].add( antenna, t, db )
			except KeyError:
				self.tagInfo[tag] = TagGroupEntry( antenna, t, db )
			self.q.task_done()
			
	def getReadsStrays( self, tNow=None, method=QuadraticRegressionMethod, antennaChoice=MostReadsChoice, removeOutliers=True ):
		'''
			Returns two lists:
				reads = [(tag1, t1, sampleSize1, antennaID1), (tag2, t2, sampleSize2, , antennaID2), ...]
				strays = [(tagA, tFirstReadA), (tagB, tFirstReadB), ...]
				
			Each stray will be reported as a read the first time it is detected.
		'''
		self.flush()
		
		trNow = datetimeToTr( tNow or datetime.now() )
		reads, strays = [], []
		toDelete = []
		
		for tag, tge in six.iteritems(self.tagInfo):
			if trNow - tge.lastReadMax >= tQuiet:				# Tag has left read range.
				if not tge.isStray:
					t, sampleSize, antennaID = tge.getBestEstimate(method, antennaChoice, removeOutliers)
					reads.append( (tag, t, sampleSize, antennaID) )
				toDelete.append( tag )
			elif tge.lastReadMax - tge.firstReadMin >= tStray:	# This is a stray.
				t = trToDatetime( tge.firstReadMin )
				if not tge.isStray:
					tge.setStray()
					reads.append( (tag, t, 1, 0) )				# Report stray first read time.
				strays.append( (tag, t) )
				
		for tag in toDelete:
			del self.tagInfo[tag]
		
		reads.sort( key=operator.itemgetter(1,0))
		strays.sort( key=operator.itemgetter(1,0) )
		return reads, strays
コード例 #44
0
ファイル: commit_queue.py プロジェクト: HWL-RobAt/gitfs
class BaseQueue(object):
    def __init__(self):
        self.queue = Queue()

    def commit(self, *args, **kwargs):
        raise NotImplemented()

    def get(self, *args, **kwargs):
        return self.queue.get(*args, **kwargs)
コード例 #45
0
ファイル: test_grader.py プロジェクト: edx/xqueue-watcher
    def test_response_on_queue(self):
        g = MockGrader()
        pl = self._make_payload({
            'student_response': 'blah',
            'grader_payload': json.dumps({
                'grader': 'correct'
                })
            })
        q = Queue()
        reply = g.process_item(pl, queue=q)
        popped = q.get()
        self.assertEqual(reply, popped)

        del pl['xqueue_body']
        try:
            g.process_item(pl, queue=q)
        except Exception as e:
            popped = q.get()
            self.assertEqual(e, popped)
コード例 #46
0
ファイル: utils.py プロジェクト: nhumrich/compose
def parallel_execute(objects, obj_callable, msg_index, msg):
    """
    For a given list of objects, call the callable passing in the first
    object we give it.
    """
    stream = get_output_stream(sys.stdout)
    lines = []
    errors = {}

    for obj in objects:
        write_out_msg(stream, lines, msg_index(obj), msg)

    q = Queue()

    def inner_execute_function(an_callable, parameter, msg_index):
        try:
            result = an_callable(parameter)
        except APIError as e:
            errors[msg_index] = e.explanation
            result = "error"
        except Exception as e:
            errors[msg_index] = e
            result = 'unexpected_exception'

        q.put((msg_index, result))

    for an_object in objects:
        t = Thread(
            target=inner_execute_function,
            args=(obj_callable, an_object, msg_index(an_object)),
        )
        t.daemon = True
        t.start()

    done = 0
    total_to_execute = len(objects)

    while done < total_to_execute:
        try:
            msg_index, result = q.get(timeout=1)

            if result == 'unexpected_exception':
                raise errors[msg_index]
            if result == 'error':
                write_out_msg(stream, lines, msg_index, msg, status='error')
            else:
                write_out_msg(stream, lines, msg_index, msg)
            done += 1
        except Empty:
            pass

    if errors:
        stream.write("\n")
        for error in errors:
            stream.write("ERROR: for {}  {} \n".format(error, errors[error]))
コード例 #47
0
 def test_command_subprocess(self):
     """test_process | command_subprocess
     """
     queue = Queue()
     self.assertRaises(
         SystemExit, shprocess.command_subprocess,
         queue,
         [sys.executable, "-c",
          "from __future__ import print_function;print('foo')"],
     )
     line = queue.get(block=True, timeout=.1)
     self.assertEqual(to_unicode("foo"), to_unicode(line).rstrip())
コード例 #48
0
ファイル: ftdi.py プロジェクト: nccgroup/umap2
class USBFtdiInterface(USBInterface):
    name = 'FtdiInterface'

    def __init__(self, app, phy, interface_number):
        super(USBFtdiInterface, self).__init__(
            app=app,
            phy=phy,
            interface_number=interface_number,
            interface_alternate=0,
            interface_class=USBClass.VendorSpecific,
            interface_subclass=0xff,
            interface_protocol=0xff,
            interface_string_index=0,
            endpoints=[
                USBEndpoint(
                    app=app,
                    phy=phy,
                    number=1,
                    direction=USBEndpoint.direction_out,
                    transfer_type=USBEndpoint.transfer_type_bulk,
                    sync_type=USBEndpoint.sync_type_none,
                    usage_type=USBEndpoint.usage_type_data,
                    max_packet_size=0x40,
                    interval=0,
                    handler=self.handle_data_available
                ),
                USBEndpoint(
                    app=app,
                    phy=phy,
                    number=3,
                    direction=USBEndpoint.direction_in,
                    transfer_type=USBEndpoint.transfer_type_bulk,
                    sync_type=USBEndpoint.sync_type_none,
                    usage_type=USBEndpoint.usage_type_data,
                    max_packet_size=0x40,
                    interval=0,
                    handler=self.handle_ep3_buffer_available  # at this point, we don't send data to the host
                )
            ],
        )
        self.txq = Queue()

    def handle_data_available(self, data):
        self.debug('received string (%d): %s' % (len(data), data))
        reply = b'\x01\x00' + data
        self.txq.put(reply)

    def handle_ep3_buffer_available(self):
        if not self.txq.empty():
            self.send_on_endpoint(3, self.txq.get())
コード例 #49
0
ファイル: py.py プロジェクト: mbodenhamer/syn
def hangwatch(timeout, func, *args, **kwargs):
    def target(queue):
        try:
            func(*args, **kwargs)
        except Exception as e:
            queue.put(sys.exc_info())
            queue.put(e)
            sys.exit()

    q = Queue()
    thread = threading.Thread(target=target, args = (q,))
    
    thread.start()
    thread.join(timeout)
    if thread.is_alive():
        raise RuntimeError('Operation did not terminate within {} seconds'
                           .format(timeout))

    if not q.empty():
        info = q.get(block=False)
        e = q.get(block=False)
        eprint(''.join(traceback.format_exception(*info)))
        raise e
コード例 #50
0
    def test_changesize(self):
        "Change sizes and make sure pool doesn't work with no workers."
        pool = workerpool.WorkerPool(5)
        for i in range(5):
            pool.grow()
        self.assertEquals(pool.size(), 10)
        for i in range(10):
            pool.shrink()
        pool.wait()
        self.assertEquals(pool.size(), 0)

        # Make sure nothing is reading jobs anymore
        q = Queue()
        for i in range(5):
            pool.put(workerpool.SimpleJob(q, sum, [range(5)]))
        try:
            q.get(block=False)
        except Empty:
            pass  # Success
        else:
            assert False, "Something returned a result, even though we are"
            "expecting no workers."
        pool.shutdown()
コード例 #51
0
ファイル: parallel.py プロジェクト: docker/compose
def parallel_execute_iter(objects, func, get_deps, limit):
    """
    Runs func on objects in parallel while ensuring that func is
    ran on object only after it is ran on all its dependencies.

    Returns an iterator of tuples which look like:

    # if func returned normally when run on object
    (object, result, None)

    # if func raised an exception when run on object
    (object, None, exception)

    # if func raised an exception when run on one of object's dependencies
    (object, None, UpstreamError())
    """
    if get_deps is None:
        get_deps = _no_deps

    if limit is None:
        limiter = NoLimit()
    else:
        limiter = Semaphore(limit)

    results = Queue()
    state = State(objects)

    while True:
        feed_queue(objects, func, get_deps, results, state, limiter)

        try:
            event = results.get(timeout=0.1)
        except Empty:
            continue
        # See https://github.com/docker/compose/issues/189
        except thread.error:
            raise ShutdownException()

        if event is STOP:
            break

        obj, _, exception = event
        if exception is None:
            log.debug('Finished processing: {}'.format(obj))
            state.finished.add(obj)
        else:
            log.debug('Failed: {}'.format(obj))
            state.failed.add(obj)

        yield event
コード例 #52
0
ファイル: call_python_client.py プロジェクト: mposa/drake
    def _handle_messages_threaded(self):
        # Handles messages in a threaded fashion.
        queue = Queue()

        def producer_loop():
            # Read messages from file, and queue them for execution.
            for msg in self._read_next_message():
                queue.put(msg)
                # Check if an error occurred.
                if self._done:
                    break
            # Wait until the queue empties out to signal completion from the
            # producer's side.
            if not self._done:
                queue.join()
                self._done = True

        producer = Thread(name="Producer", target=producer_loop)
        # @note Previously, when trying to do `queue.clear()` in the consumer,
        # and `queue.join()` in the producer, there would be intermittent
        # deadlocks. By demoting the producer to a daemon, I (eric.c) have not
        # yet encountered a deadlock.
        producer.daemon = True
        producer.start()

        # Consume.
        # TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
        # works). Is there a way to have `plt.pause` handle Ctrl+C differently?
        try:
            pause = self.scope_globals['pause']
            while not self._done:
                # Process messages.
                while not queue.empty():
                    msg = queue.get()
                    queue.task_done()
                    self._execute_message(msg)
                # Spin busy for a bit, let matplotlib (or whatever) flush its
                # event queue.
                pause(0.01)
        except KeyboardInterrupt:
            # User pressed Ctrl+C.
            self._done = True
            print("Quitting")
        except Exception as e:
            # We encountered an error, and must stop.
            self._done = True
            self._had_error = True
            traceback.print_exc(file=sys.stderr)
            sys.stderr.write("  Stopping (--stop_on_error)\n")
コード例 #53
0
ファイル: etcd.py プロジェクト: zalando/patroni
class DnsCachingResolver(Thread):

    def __init__(self, cache_time=600.0, cache_fail_time=30.0):
        super(DnsCachingResolver, self).__init__()
        self._cache = {}
        self._cache_time = cache_time
        self._cache_fail_time = cache_fail_time
        self._resolve_queue = Queue()
        self.daemon = True
        self.start()

    def run(self):
        while True:
            hostname, attempt = self._resolve_queue.get()
            ips = self._do_resolve(hostname)
            if ips:
                self._cache[hostname] = (time.time(), ips)
            else:
                if attempt < 10:
                    self.resolve_async(hostname, attempt + 1)
                    time.sleep(1)

    def resolve(self, hostname):
        current_time = time.time()
        cached_time, ips = self._cache.get(hostname, (0, []))
        time_passed = current_time - cached_time
        if time_passed > self._cache_time or (not ips and time_passed > self._cache_fail_time):
            new_ips = self._do_resolve(hostname)
            if new_ips:
                self._cache[hostname] = (current_time, new_ips)
                ips = new_ips
        return ips

    def resolve_async(self, hostname, attempt=0):
        self._resolve_queue.put((hostname, attempt))

    @staticmethod
    def _do_resolve(hostname):
        try:
            ret = set()
            for r in socket.getaddrinfo(hostname, 0, 0, 0, socket.IPPROTO_TCP):
                if r[0] == socket.AF_INET6:
                    ret.add('[{0}]'.format(r[4][0]))
                else:
                    ret.add(r[4][0])
            return list(ret)
        except socket.gaierror:
            logger.warning('failed to resolve host %s', hostname)
            return []
コード例 #54
0
ファイル: pools.py プロジェクト: davanstrien/workerpool
    def map(self, fn, *seq):
        "Perform a map operation distributed among the workers. Will "
        "block until done."
        results = Queue()
        args = zip(*seq)
        for seq in args:
            j = SimpleJob(results, fn, seq)
            self.put(j)

        # Aggregate results
        r = []
        for i in range(len(list(args))):
            r.append(results.get())

        return r
コード例 #55
0
ファイル: parallel.py プロジェクト: 2k0ri/compose
def parallel_execute(objects, func, index_func, msg):
    """For a given list of objects, call the callable passing in the first
    object we give it.
    """
    objects = list(objects)
    stream = get_output_stream(sys.stderr)
    writer = ParallelStreamWriter(stream, msg)

    for obj in objects:
        writer.initialize(index_func(obj))

    q = Queue()

    # TODO: limit the number of threads #1828
    for obj in objects:
        t = Thread(
            target=perform_operation,
            args=(func, obj, q.put, index_func(obj)))
        t.daemon = True
        t.start()

    done = 0
    errors = {}

    while done < len(objects):
        try:
            msg_index, result = q.get(timeout=1)
        except Empty:
            continue

        if isinstance(result, APIError):
            errors[msg_index] = "error", result.explanation
            writer.write(msg_index, 'error')
        elif isinstance(result, Exception):
            errors[msg_index] = "unexpected_exception", result
        else:
            writer.write(msg_index, 'done')
        done += 1

    if not errors:
        return

    stream.write("\n")
    for msg_index, (result, error) in errors.items():
        stream.write("ERROR: for {}  {} \n".format(msg_index, error))
        if result == 'unexpected_exception':
            raise error
コード例 #56
0
ファイル: audio.py プロジェクト: nccgroup/umap2
class AudioStreaming(object):

    def __init__(self, app, phy, tx_ep, rx_ep):
        self.app = app
        self.phy = phy
        self.tx_ep = tx_ep
        self.rx_ep = rx_ep
        self.txq = Queue()

    def buffer_available(self):
        if self.txq.empty():
            self.phy.send_on_endpoint(self.tx_ep, b'\x00\x00\x00\x00\x00\x00\x00\x00')
        else:
            self.phy.send_on_endpoint(self.tx_ep, self.txq.get())

    def data_available(self, data):
        self.app.logger.info('[AudioStreaming] Got %#x bytes on streaming endpoint' % (len(data)))
コード例 #57
0
ファイル: destroyhandler.py プロジェクト: 12190143/Theano
def _build_droot_impact(destroy_handler):
    droot = {}   # destroyed view + nonview variables -> foundation
    impact = {}  # destroyed nonview variable -> it + all views of it
    root_destroyer = {}  # root -> destroyer apply

    for app in destroy_handler.destroyers:
        for output_idx, input_idx_list in app.op.destroy_map.items():
            if len(input_idx_list) != 1:
                raise NotImplementedError()
            input_idx = input_idx_list[0]
            input = app.inputs[input_idx]

            # Find non-view variable which is ultimatly viewed by input.
            view_i = destroy_handler.view_i
            _r = input
            while _r is not None:
                r = _r
                _r = view_i.get(r)
            input_root = r

            if input_root in droot:
                raise InconsistencyError(
                    "Multiple destroyers of %s" % input_root)
            droot[input_root] = input_root
            root_destroyer[input_root] = app

            # The code here add all the variables that are views of r into
            # an OrderedSet input_impact
            input_impact = OrderedSet()
            queue = Queue()
            queue.put(input_root)
            while not queue.empty():
                v = queue.get()
                for n in destroy_handler.view_o.get(v, []):
                    input_impact.add(n)
                    queue.put(n)

            for v in input_impact:
                assert v not in droot
                droot[v] = input_root

            impact[input_root] = input_impact
            impact[input_root].add(input_root)

    return droot, impact, root_destroyer
コード例 #58
0
ファイル: event_queue.py プロジェクト: pymedusa/SickRage
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = 'EVENT-QUEUE'
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as error:
            log.error(u'Exception generated in thread %s: %s',
                      self.name, ex(error))
            log.debug(repr(traceback.format_exc()))

    # System Events
    class SystemEvent(Event):
        RESTART = 'RESTART'
        SHUTDOWN = 'SHUTDOWN'
コード例 #59
0
ファイル: evaluation.py プロジェクト: EgoIncarnate/Mathics
def run_with_timeout(request, timeout):
    '''
    interrupts evaluation after a given time period.
    '''
    if timeout is None:
        return request()

    queue = Queue(maxsize=1)   # stores the result or exception
    thread = Thread(target=_thread_target, args=(request, queue))
    thread.start()

    thread.join(timeout)
    if thread.is_alive():
        raise TimeoutInterrupt()

    success, result = queue.get()
    if success:
        return result
    else:
        six.reraise(*result)